diff --git a/.github/labeler-issue-triage.yml b/.github/labeler-issue-triage.yml new file mode 100644 index 000000000000..d1a51fda447f --- /dev/null +++ b/.github/labeler-issue-triage.yml @@ -0,0 +1,4 @@ +bug: + - 'panic:' +crash: + - 'panic:' diff --git a/.github/labeler-pull-request-triage.yml b/.github/labeler-pull-request-triage.yml new file mode 100644 index 000000000000..102bd53e27b1 --- /dev/null +++ b/.github/labeler-pull-request-triage.yml @@ -0,0 +1,6 @@ +dependencies: + - go.mod + - go.sum + - vendor/**/* +documentation: + - website/**/* diff --git a/.github/workflows/depscheck.yaml b/.github/workflows/depscheck.yaml index 4112b48165f8..a00c4fc46b05 100644 --- a/.github/workflows/depscheck.yaml +++ b/.github/workflows/depscheck.yaml @@ -17,7 +17,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 with: - go-version: '1.16.3' + go-version: '1.16.6' - run: bash scripts/gogetcookie.sh - run: make tools - run: make depscheck diff --git a/.github/workflows/gencheck.yaml b/.github/workflows/gencheck.yaml index 30751a2f6c42..912ef4b2c472 100644 --- a/.github/workflows/gencheck.yaml +++ b/.github/workflows/gencheck.yaml @@ -15,7 +15,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 with: - go-version: '1.16.3' + go-version: '1.16.6' - run: bash scripts/gogetcookie.sh - run: make tools - run: make gencheck diff --git a/.github/workflows/golint.yaml b/.github/workflows/golint.yaml index 0f3f2d111b8d..2b0e29087345 100644 --- a/.github/workflows/golint.yaml +++ b/.github/workflows/golint.yaml @@ -17,8 +17,8 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 with: - go-version: '1.16.3' + go-version: '1.16.6' - uses: golangci/golangci-lint-action@v2 with: version: 'v1.32' - args: --timeout=30m0s + args: -v diff --git a/.github/workflows/gradually-deprecated.yaml b/.github/workflows/gradually-deprecated.yaml index 49f14c234fc2..fe5ce940323b 100644 --- a/.github/workflows/gradually-deprecated.yaml +++ b/.github/workflows/gradually-deprecated.yaml @@ -17,5 +17,5 @@ jobs: fetch-depth: 0 - uses: actions/setup-go@v2 with: - go-version: '1.16.3' + go-version: '1.16.6' - run: ./scripts/run-gradually-deprecated.sh diff --git a/.github/workflows/issue-comment-created.yaml b/.github/workflows/issue-comment-created.yaml new file mode 100644 index 000000000000..c5e499b51140 --- /dev/null +++ b/.github/workflows/issue-comment-created.yaml @@ -0,0 +1,18 @@ +name: Issue Comment Created Triage + +on: + issue_comment: + types: [created] + +jobs: + issue_comment_triage: + runs-on: ubuntu-latest + steps: + - uses: actions-ecosystem/action-remove-labels@v1 + with: + github_token: "${{ secrets.GITHUB_TOKEN }}" + labels: stale + - uses: actions-ecosystem/action-remove-labels@v1 + with: + github_token: "${{ secrets.GITHUB_TOKEN }}" + labels: waiting-response diff --git a/.github/workflows/issue-opened.yaml b/.github/workflows/issue-opened.yaml new file mode 100644 index 000000000000..c3a4c065d878 --- /dev/null +++ b/.github/workflows/issue-opened.yaml @@ -0,0 +1,16 @@ +name: Issue Opened Triage + +on: + issues: + types: [opened] + +jobs: + issue_triage: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: github/issue-labeler@v2.4 + with: + repo-token: "${{ secrets.GITHUB_TOKEN }}" + configuration-path: .github/labeler-issue-triage.yml + enable-versioned-regex: 0 diff --git a/.github/workflows/milestone-closed.yaml b/.github/workflows/milestone-closed.yaml new file mode 100644 index 000000000000..abb8b656eb85 --- /dev/null +++ b/.github/workflows/milestone-closed.yaml @@ -0,0 +1,20 @@ +name: Closed Milestones + +on: + milestone: + types: [closed] + +permissions: + issues: write + pull-requests: write + +jobs: + Comment: + runs-on: ubuntu-latest + steps: + - uses: bflad/action-milestone-comment@v1 + with: + body: | + This functionality has been released in [${{ github.event.milestone.title }} of the Terraform Provider](https://github.com/${{ github.repository }}/blob/${{ github.event.milestone.title }}/CHANGELOG.md). Please see the [Terraform documentation on provider versioning](https://www.terraform.io/docs/configuration/providers.html#provider-versions) or reach out if you need any assistance upgrading. + + For further feature requests or bug reports with this functionality, please create a [new GitHub issue](https://github.com/${{ github.repository }}/issues/new/choose) following the template. Thank you! diff --git a/.github/workflows/pull-request.yaml b/.github/workflows/pull-request.yaml new file mode 100644 index 000000000000..9c58e3d541c8 --- /dev/null +++ b/.github/workflows/pull-request.yaml @@ -0,0 +1,26 @@ +name: "Pull Request Triage" + +on: [pull_request_target] + +jobs: + triage: + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@v3 + with: + configuration-path: .github/labeler-pull-request-triage.yml + repo-token: "${{ secrets.GITHUB_TOKEN }}" + # See also: https://github.com/CodelyTV/pr-size-labeler/pull/26 + - uses: bflad/pr-size-labeler@7df62b12a176513631973abfe151d2b6213c3f12 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + xs_label: 'size/XS' + xs_max_size: '30' + s_label: 'size/S' + s_max_size: '60' + m_label: 'size/M' + m_max_size: '150' + l_label: 'size/L' + l_max_size: '300' + xl_label: 'size/XL' + message_if_xl: '' diff --git a/.github/workflows/tflint.yaml b/.github/workflows/tflint.yaml index d44abf1faa3d..cf30cce63676 100644 --- a/.github/workflows/tflint.yaml +++ b/.github/workflows/tflint.yaml @@ -17,7 +17,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 with: - go-version: '1.16.3' + go-version: '1.16.6' - run: bash scripts/gogetcookie.sh - run: make tools - run: make tflint diff --git a/.github/workflows/thirty-two-bit.yaml b/.github/workflows/thirty-two-bit.yaml index ac35726eb8f2..910f77da271f 100644 --- a/.github/workflows/thirty-two-bit.yaml +++ b/.github/workflows/thirty-two-bit.yaml @@ -9,7 +9,7 @@ on: - '.github/workflows/**' jobs: - compatability-32bit-test: + compatibility-32bit-test: runs-on: ubuntu-latest strategy: fail-fast: true @@ -17,7 +17,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 with: - go-version: '1.16.3' + go-version: '1.16.6' - run: bash scripts/gogetcookie.sh - run: make tools - run: GOARCH=386 GOOS=linux go build -o 32bitbuild . diff --git a/.github/workflows/unit-test.yaml b/.github/workflows/unit-test.yaml index 86c39e650303..088ecf84c1f1 100644 --- a/.github/workflows/unit-test.yaml +++ b/.github/workflows/unit-test.yaml @@ -17,7 +17,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 with: - go-version: '1.16.3' + go-version: '1.16.6' - run: bash scripts/gogetcookie.sh - run: make test env: diff --git a/.github/workflows/website-lint.yaml b/.github/workflows/website-lint.yaml index 9c8a1876f154..9b20aff50573 100644 --- a/.github/workflows/website-lint.yaml +++ b/.github/workflows/website-lint.yaml @@ -16,7 +16,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 with: - go-version: '1.16.3' + go-version: '1.16.6' - run: bash scripts/gogetcookie.sh - run: make tools - run: make website-lint diff --git a/.go-version b/.go-version index c807441cfed7..de646d2fc11c 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.16.3 +1.16.6 diff --git a/.golangci.yml b/.golangci.yml index e0f08aad21dc..2aef124d72c4 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,8 +1,8 @@ run: - deadline: 12m10s + timeout: 30m modules-download-mode: vendor skip-dirs: - - vendor + - internal/services/*/sdk # Excluding sdk folders as these are externally generated issues: max-per-linter: 0 @@ -32,12 +32,14 @@ linters: - varcheck - vet - vetshadow - - whitespace +# - whitespace # Disabled for performance reasons linters-settings: errcheck: - ignore: github.com/hashicorp/terraform-plugin-sdk/helper/schema:ForceNew|Set,fmt:.*,io:Close + ignore: github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema:ForceNew|Set,fmt:.*,io:Close misspell: ignore-words: - hdinsight - exportfs + nakedret: + max-func-lines: 40 diff --git a/.hashibot.hcl b/.hashibot.hcl deleted file mode 100644 index 28b50e4bc257..000000000000 --- a/.hashibot.hcl +++ /dev/null @@ -1,13 +0,0 @@ -queued_behavior "release_commenter" "releases" { - repo_prefix = "terraform-provider-" - - message = <<-EOF - This has been released in [version ${var.release_version} of the provider](${var.changelog_link}). Please see the [Terraform documentation on provider versioning](https://www.terraform.io/docs/configuration/providers.html#provider-versions) or reach out if you need any assistance upgrading. As an example: - ```hcl - provider "${var.project_name}" { - version = "~> ${var.release_version}" - } - # ... other configuration ... - ``` - EOF -} diff --git a/.teamcity/components/build_components.kt b/.teamcity/components/build_components.kt index 2c4813ba4357..77c8feb5ef03 100644 --- a/.teamcity/components/build_components.kt +++ b/.teamcity/components/build_components.kt @@ -127,14 +127,17 @@ fun ParametrizedWithType.hiddenPasswordVariable(name: String, value: String, des password(name, value, "", description, ParameterDisplay.HIDDEN) } -fun Triggers.RunNightly(nightlyTestsEnabled: Boolean, startHour: Int) { +fun Triggers.RunNightly(nightlyTestsEnabled: Boolean, startHour: Int, daysOfWeek: String, daysOfMonth: String) { schedule{ enabled = nightlyTestsEnabled branchFilter = "+:refs/heads/master" - schedulingPolicy = daily { - hour = startHour + schedulingPolicy = cron { + hours = startHour.toString() timezone = "SERVER" + + dayOfWeek = daysOfWeek + dayOfMonth = daysOfMonth } } } diff --git a/.teamcity/components/build_config_service.kt b/.teamcity/components/build_config_service.kt index 6d0d72bfd0f1..593559a8fde9 100644 --- a/.teamcity/components/build_config_service.kt +++ b/.teamcity/components/build_config_service.kt @@ -5,7 +5,7 @@ class serviceDetails(name: String, displayName: String, environment: String) { val displayName = displayName val environment = environment - fun buildConfiguration(providerName : String, nightlyTestsEnabled: Boolean, startHour: Int, parallelism: Int) : BuildType { + fun buildConfiguration(providerName : String, nightlyTestsEnabled: Boolean, startHour: Int, parallelism: Int, daysOfWeek: String, daysOfMonth: String) : BuildType { return BuildType { // TC needs a consistent ID for dynamically generated packages id(uniqueID(providerName)) @@ -41,7 +41,7 @@ class serviceDetails(name: String, displayName: String, environment: String) { } triggers { - RunNightly(nightlyTestsEnabled, startHour) + RunNightly(nightlyTestsEnabled, startHour, daysOfWeek, daysOfMonth) } } } diff --git a/.teamcity/components/generated/services.kt b/.teamcity/components/generated/services.kt index f552260490e9..72e26cb37bce 100644 --- a/.teamcity/components/generated/services.kt +++ b/.teamcity/components/generated/services.kt @@ -28,11 +28,13 @@ var services = mapOf( "datalake" to "Data Lake", "datashare" to "Data Share", "databricks" to "DataBricks", + "dataprotection" to "DataProtection", "databasemigration" to "Database Migration", "databoxedge" to "Databox Edge", "desktopvirtualization" to "Desktop Virtualization", "devtestlabs" to "Dev Test", "digitaltwins" to "Digital Twins", + "domainservices" to "DomainServices", "eventgrid" to "EventGrid", "eventhub" to "EventHub", "firewall" to "Firewall", diff --git a/.teamcity/components/project.kt b/.teamcity/components/project.kt index 2c90be65d509..ee052834a376 100644 --- a/.teamcity/components/project.kt +++ b/.teamcity/components/project.kt @@ -23,12 +23,12 @@ fun buildConfigurationsForServices(services: Map, providerName : services.forEach { (serviceName, displayName) -> // TODO: overriding locations - var defaultTestConfig = testConfiguration(defaultParallelism, defaultStartHour) + var defaultTestConfig = testConfiguration() var testConfig = serviceTestConfigurationOverrides.getOrDefault(serviceName, defaultTestConfig) var runNightly = runNightly.getOrDefault(environment, false) var service = serviceDetails(serviceName, displayName, environment) - var buildConfig = service.buildConfiguration(providerName, runNightly, testConfig.startHour, testConfig.parallelism) + var buildConfig = service.buildConfiguration(providerName, runNightly, testConfig.startHour, testConfig.parallelism, testConfig.daysOfWeek, testConfig.daysOfMonth) buildConfig.params.ConfigureAzureSpecificTestParameters(environment, config, locationsForEnv) @@ -46,7 +46,9 @@ fun pullRequestBuildConfiguration(environment: String, configuration: ClientConf return buildConfiguration } -class testConfiguration(parallelism: Int, startHour: Int) { +class testConfiguration(parallelism: Int = defaultParallelism, startHour: Int = defaultStartHour, daysOfWeek: String = defaultDaysOfWeek, daysOfMonth: String = defaultDaysOfMonth) { var parallelism = parallelism var startHour = startHour + var daysOfWeek = daysOfWeek + var daysOfMonth = daysOfMonth } \ No newline at end of file diff --git a/.teamcity/components/settings.kt b/.teamcity/components/settings.kt index 905ced5470ee..51bebfa84dcd 100644 --- a/.teamcity/components/settings.kt +++ b/.teamcity/components/settings.kt @@ -5,7 +5,13 @@ var defaultStartHour = 0 var defaultParallelism = 20 // specifies the default version of Terraform Core which should be used for testing -var defaultTerraformCoreVersion = "0.15.3" +var defaultTerraformCoreVersion = "1.0.1" + +// This represents a cron view of days of the week, Monday - Friday. +const val defaultDaysOfWeek = "2,3,4,5,6" + +// Cron value for any day of month +const val defaultDaysOfMonth = "*" var locations = mapOf( "public" to LocationConfiguration("westeurope", "eastus2", "francecentral", false), @@ -20,40 +26,49 @@ var runNightly = mapOf( // specifies a list of services which should be run with a custom test configuration var serviceTestConfigurationOverrides = mapOf( // these tests all conflict with one another - "authorization" to testConfiguration(1, defaultStartHour), + "authorization" to testConfiguration(parallelism = 1), //Blueprints are constrained on the number of targets available - these execute quickly, so can be serialised - "blueprints" to testConfiguration(1, defaultStartHour), + "blueprints" to testConfiguration(parallelism = 1), + + // "cognitive" is expensive - Monday, Wednesday, Friday + "cognitive" to testConfiguration(daysOfWeek = "2,4,6"), // The AKS API has a low rate limit - "containers" to testConfiguration(5, defaultStartHour), + "containers" to testConfiguration(parallelism = 5), // Data Lake has a low quota - "datalake" to testConfiguration(2, defaultStartHour), + "datalake" to testConfiguration(parallelism = 2), + + // "hdinsight" is super expensive + "hdinsight" to testConfiguration(daysOfWeek = "2,4,6"), // HPC Cache has a 4 instance per subscription quota as of early 2021 - "hpccache" to testConfiguration(3, defaultStartHour), + "hpccache" to testConfiguration(parallelism = 3, daysOfWeek = "2,4,6"), - // HSM has low quota and potentially slow recycle time - "hsm" to testConfiguration(1, defaultStartHour), + // HSM has low quota and potentially slow recycle time, Only run on Mondays + "hsm" to testConfiguration(parallelism = 1, daysOfWeek = "1"), // Log Analytics Clusters have a max deployments of 2 - parallelism set to 1 or `importTest` fails - "loganalytics" to testConfiguration(1, defaultStartHour), + "loganalytics" to testConfiguration(parallelism = 1), + + // netapp has a max of 20 accounts per subscription so lets limit it to 10 to account for broken ones, run Monday, Wednesday, Friday + "netapp" to testConfiguration(parallelism = 10, daysOfWeek = "2,4,6"), - // netapp has a max of 20 accounts per subscription so lets limit it to 10 to account for broken ones - "netapp" to testConfiguration(10, defaultStartHour), + // redisenterprise is costly - Monday, Wednesday, Friday + "redisenterprise" to testConfiguration(daysOfWeek = "2,4,6"), // servicebus quotas are limited and we experience failures if tests // execute too quickly as we run out of namespaces in the sub - "servicebus" to testConfiguration(10, defaultStartHour), + "servicebus" to testConfiguration(parallelism = 10), // SignalR only allows provisioning one "Free" instance at a time, // which is used in multiple tests - "signalr" to testConfiguration(1, defaultStartHour), + "signalr" to testConfiguration(parallelism = 1), // Spring Cloud only allows a max of 10 provisioned - "springcloud" to testConfiguration(5, defaultStartHour), + "springcloud" to testConfiguration(parallelism = 5), // Currently have a quota of 10 nodes, 3 nodes required per test so lets limit it to 3 - "vmware" to testConfiguration(3, defaultStartHour) + "vmware" to testConfiguration(parallelism = 3) ) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5f0c2a7fcad7..5d487eb0a042 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,22 +1,320 @@ -## 2.61.0 (Unreleased) +## 2.70.0 (Unreleased) + +ENHANCEMENTS: + +* maps: refactoring to use an Embedded SDK [GH-12716] +* msi: refactoring to use an Embedded SDK [GH-12715] + +## 2.69.0 (July 23, 2021) + +FEATURES: + +* **New Data Source** `azurerm_active_directory_domain_service` ([#10782](https://github.com/terraform-providers/terraform-provider-azurerm/issues/10782)) +* **New Resource** `azurerm_active_directory_domain_service` ([#10782](https://github.com/terraform-providers/terraform-provider-azurerm/issues/10782)) +* **New Resource** `azurerm_active_directory_domain_service_replica_set` ([#10782](https://github.com/terraform-providers/terraform-provider-azurerm/issues/10782)) +* **New Resource** `azurerm_api_management_gateway_api` ([#12398](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12398)) +* **New Resource** `azurerm_batch_job` ([#12573](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12573)) +* **New Resource** `azurerm_bot_channel_web_chat` ([#12672](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12672)) +* **New Resource** `azurerm_data_factory_managed_private_endpoint` ([#12618](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12618)) +* **New Resource** `azurerm_data_protection_backup_policy_blob_storage` ([#12362](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12362)) +* **New Resource** `azurerm_signalr_service_network_acl` ([#12434](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12434)) +* **New Resource** `azurerm_virtual_network_dns_servers` ([#10782](https://github.com/terraform-providers/terraform-provider-azurerm/issues/10782)) + +ENHANCEMENTS: + +* dependencies: Updgrading to `v55.6.0` of `github.com/Azure/azure-sdk-for-go` ([#12565](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12565)) +* `azurerm_api_management_named_value` - the field `secret_id` can now be set to a versionless Key Vault Key ([#12641](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12641)) +* `azurerm_data_factory_integration_runtime_azure_ssis` - support for the `public_ips`, `express_custom_setup`, `package_store`, and `proxy` blocks ([#12545](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12545)) +* `azurerm_data_factory_integration_runtime_azure_ssis` - support for the `key_vault_password`, and `key_vault_license` blocks ([#12659](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12659)) +* `azurerm_bot_channels_registration` - support for the `cmk_key_vault_url`, `description`, `icon_url`, and `isolated_network_enabled` ([#12560](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12560)) +* `azurerm_data_factory_integration_runtime_azure` - support for the `virtual_network_enabled` property ([#12619](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12619)) +* `azurerm_eventgrid_event_subscription` - support for the `advanced_filtering_on_arrays_enabled` property ([#12609](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12609)) +* `azurerm_eventgrid_system_topic_event_subscription` - support for the `advanced_filtering_on_arrays_enabled` property ([#12609](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12609)) +* `azurerm_eventhub_namespace` - support for Azure Event Hubs Namespace Premium tier ([#12695](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12695)) +* `azurerm_kubernetes_cluster` - support for downgrading `sku_tier` from `Paid` to `Free` without recreating the Cluster ([#12651](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12651)) +* `azurerm_kusto_eventgrid_data_connection` - Add supported `data_format` APACHEAVRO, ORC, PARQUET, TSVE and W3CLOGFILE to validation function. ([#12687](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12687)) +* `azurerm_postgresql_flexible_server` - support for the `high_availability` block ([#12587](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12587)) + +BUG FIXES: + +* `data.azurerm_redis_cache` - fix a bug that caused the data source to raise an error ([#12666](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12666)) +* `azurerm_application_gateway` - return an error when ssl policy is not properly configured ([#12647](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12647)) +* `azurerm_data_factory_linked_custom_service` - fix a bug causing `additional_properties` to be read incorrectly into state ([#12664](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12664)) +* `azurerm_eventhub_authorization_rule` - fixing the error "empty non-retryable error received" ([#12642](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12642)) +* `azurerm_machine_learning_compute_cluster` - fix a crash when creating a cluster without specifying `subnet_resource_id` ([#12658](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12658)) +* `azurerm_storage_account` - fixed account_replication_type validation ([#12645](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12645)) + +## 2.68.0 (July 16, 2021) + +FEATURES: + +* **New Data Source** `azurerm_local_network_gateway` ([#12579](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12579)) +* **New Resource** `azurerm_api_management_api_release` ([#12562](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12562)) +* **New Resource** `azurerm_data_protection_backup_policy_disk` ([#12361](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12361)) +* **New Resource** `azurerm_data_factory_custom_dataset` ([#12484](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12484)) +* **New Resource** `azurerm_data_factory_dataset_binary` ([#12369](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12369)) +* **New Resource** `azurerm_maintenance_assignment_virtual_machine_scale_set` ([#12273](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12273)) +* **New Resource** `azurerm_postgresql_flexible_server_configuration` ([#12294](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12294)) +* **New Resource** `azurerm_synapse_private_link_hub` ([#12495](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12495)) + +ENHANCEMENTS: + +* dependencies: upgrading to `v55.5.0` of `github.com/Azure/azure-sdk-for-go` ([#12435](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12435)) +* dependencies: updating `bot` to use API Version `2021-03-01` ([#12449](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12449)) +* dependencies: updating `maintenance` to use API Version `2021-05-01` ([#12273](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12273)) +* `azurerm_api_management_named_value` - support for the `value_from_key_vault` block ([#12309](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12309)) +* `azurerm_api_management_api_diagnostic` - support for the `data_masking`1 property ([#12419](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12419)) +* `azurerm_cognitive_account` - support for the `identity`, `storage`, `disable_local_auth`, `fqdns`, `public_network_access_enabled`, and `restrict_outbound_network_access` properties ([#12469](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12469)) +* `azurerm_cognitive_account` - the `virtual_network_subnet_ids` property has been deprecated in favour of `virtual_network_rules` block to supoport the `ignore_missing_vnet_service_endpoint` property ([#12600](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12600)) +* `azurerm_container_registry` - now exports the `principal_id` and `tenant_id` attributes in the `identity` block ([#12378](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12378)) +* `azurerm_data_factory` - support for the `managed_virtual_network_enabled` property ([#12343](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12343)) +* `azurerm_linux_virtual_machine_scale_set` - Fix un-necessary VMSS instance rolling request ([#12590](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12590)) +* `azurerm_maintenance_configuration` - support for the `window`, `visibility`, and `properties` blocks ([#12273](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12273)) +* `azurerm_powerbi_embedded` - support for the `mode` property ([#12394](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12394)) +* `azurerm_redis_cache` - support for the `maintenance_window` property in the `patch_schedule` block ([#12472](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12472)) +* `azurerm_storage_account_customer_managed_key` - support for the `user_assigned_identity_id` property ([#12516](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12516)) + +BUG FIXES: + +* `azurerm_api_management` - no longer forces a new resource when changing the `subnet_id` property ([#12611](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12611)) +* `azurerm_function_app` - set a default value for `os_type` and allow a blank string to be specified as per documentation ([#12482](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12482)) +* `azurerm_key_vault_access_policy` - prevent a possible panic on delete ([#12616](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12616)) +* `azurerm_postgresql_flexible_server` - add new computed property `private_dns_zone_id` to work around a upcomming breaking change in the API ([#12288](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12288)) +* `machine_learning_compute_cluster` - make the `subnet_resource_id` property actually optional ([#12558](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12558)) +* `azurerm_mssql_database` - don't allow license_type to be set for serverless SQL databases ([#12555](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12555)) +* `azurerm_subnet_network_security_group_association` - prevent potential deadlocks when using multiple association resources ([#12267](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12267)) + +## 2.67.0 (July 09, 2021) + +FEATURES: + +* **New Data Source** `azurerm_api_management_gateway` ([#12297](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12297)) +* **New Resource** `azurerm_api_management_gateway` ([#12297](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12297)) +* **New Resource** `azurerm_databricks_workspace_customer_managed_key`([#12331](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12331)) + +ENHANCEMENTS: + +* dependencies: updating `postgresqlflexibleservers` to use API Version `2021-06-01` ([#12405](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12405)) +* `azurerm_databricks_workspace` - add support for `machine_learning_workspace_id`, `customer_managed_key_enabled`, `infrastructure_encryption_enabled` and `storage_account_identity` ([#12331](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12331)) +* `azurerm_security_center_assessment_policy` - support for the `categories` propety ([#12383](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12383)) + +BUG FIXES: + +* `azurerm_api_management` - fix an issue where changing the location of an `additional_location` would force a new resource ([#12468](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12468)) +* `azurerm_app_service` - fix crash when resource group or ASE is missing. ([#12518](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12518)) +* `azurerm_automation_variable_int` - fixed value parsing order causing `1` to be considered a bool ([#12511](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12511)) +* `azurerm_automation_variable_bool` - fixed value parsing order causing `1` to be considered a bool ([#12511](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12511)) +* `azurerm_data_factory_dataset_parquet` - the `azure_blob_storage_location.filename` property cis now optional ([#12414](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12414)) +* `azurerm_kusto_eventhub_data_connection` - `APACHEAVRO` can now be used as a `data_format` option ([#12480](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12480)) +* `azurerm_site_recovery_replicated_vm ` - Fix potential crash in reading `managed_disk` properties ([#12509](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12509)) +* `azurerm_storage_account` - `account_replication_type` can now be updated ([#12479](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12479)) +* `azurerm_storage_management_policy` - fix crash in read of properties ([#12487](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12487)) +* `azurerm_storage_share_directory` now allows underscore in property `name` [[#12454](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12454)] +* `azurerm_security_center_subscription_pricing` - removed Owner permission note from documentation ([#12481](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12481)) + +DEPRECATIONS: + +* `azurerm_postgresql_flexible_server` - the `cmk_enabled` property has been deprecated as it has been removed from the API ([#12405](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12405)) +* `azurerm_virtual_machine_configuration_policy_assignment` - has been deprecated and renamed to `azurerm_policy_virtual_machine_configuration_assignment` ([#12497](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12497)) + +## 2.66.0 (July 02, 2021) + +FEATURES: + +* **New Resource** `azurerm_api_management_api_operation_tag` ([#12384](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12384)) +* **New Resource** `azurerm_data_factory_linked_custom_service` ([#12224](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12224)) +* **New Resource** `azurerm_data_factory_trigger_blob_event` ([#12330](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12330)) +* **New Resource** `azurerm_express_route_connection` ([#11320](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11320)) +* **New Resource** `azurerm_express_route_circuit_connection` ([#11303](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11303)) +* **New Resource** `azurerm_management_group_policy_assignment` ([#12349](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12349)) +* **New Resource** `azurerm_resource_group_policy_assignment` ([#12349](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12349)) +* **New Resource** `azurerm_resource_policy_assignment` ([#12349](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12349)) +* **New Resource** `azurerm_subscription_policy_assignment` ([#12349](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12349)) +* **New resource** `azurerm_tenant_configuration` ([#11697](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11697)) +* Cognitive Service now supports purging soft delete accounts ([#12281](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12281)) + +ENHANCEMENTS: + +* dependencies: updating `cognitive` to use API Version `2021-03-01` ([#12281](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12281)) +* dependencies: updating `trafficmanager` to use API Version `2018-08-01` ([#12400](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12400)) +* `azurerm_api_management_backend` - support for the `client_certificate_id` property ([#12402](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12402)) +* `azurerm_api_management_api` - support for the `revision_description`, `version_description`, and `source_api_id` properties ([#12266](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12266)) +* `azurerm_batch_account` - support for the `public_network_access_enabled` property ([#12401](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12401)) +* `azurerm_eventgrid_event_subscription` - support for additional advanced filters `string_not_begins_with`, `string_not_ends_with`, `string_not_contains`, `is_not_null`, `is_null_or_undefined`, `number_in_range` and `number_not_in_range` ([#12167](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12167)) +* `azurerm_eventgrid_system_topic_event_subscription` - support for additional advanced filters `string_not_begins_with`, `string_not_ends_with`, `string_not_contains`, `is_not_null`, `is_null_or_undefined`, `number_in_range` and `number_not_in_range` ([#12167](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12167)) +* `azurerm_kubernetes_cluster` - support for the `fips_enabled`, `kubelet_disk_type`, and `license` properties ([#11835](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11835)) +* `azurerm_kubernetes_cluster_node_pool` - support for the `fips_enabled`, and `kubelet_disk_type` properties ([#11835](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11835)) +* `azurerm_lighthouse_definition` - support for the `plan` block ([#12360](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12360)) +* `azurerm_site_recovery_replicated_vm` - Add support for `target_disk_encryption_set_id` in `managed_disk` ([#12374](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12374)) +* `azurerm_traffic_manager_endpoint` - supports for the `minimum_required_child_endpoints_ipv4` and `minimum_required_child_endpoints_ipv6` ([#12400](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12400)) + +BUG FIXES: + +* `azurerm_app_service` - fix app_setting and SCM setting ordering ([#12280](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12280)) +* `azurerm_hdinsight_kafka_cluster` - will no longer panic from an empty `component_version` property ([#12261](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12261)) +* `azurerm_spatial_anchors_account` - the `tags` property can now be updated without creating a new resource ([#11985](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11985)) +* **Data Source** `azurerm_app_service_environment_v3` - fix id processing for Read ([#12436](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12436)) + + +## 2.65.0 (June 25, 2021) + +FEATURES: + +* **New Resource** `azurerm_data_protection_backup_instance_postgresql` ([#12220](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12220)) +* **New Resource** `azurerm_hpc_cache_blob_nfs_target` ([#11671](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11671)) +* **New Resource** `azurerm_nat_gateway_public_ip_prefix_association` ([#12353](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12353)) + +ENHANCEMENTS: + +* dependencies: updating to `v2.6.1` of `github.com/hashicorp/terraform-plugin-sdk` ([#12209](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12209)) +* dependencies: upgrading to `v55.3.0` of `github.com/Azure/azure-sdk-for-go` ([#12263](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12263)) +* dependencies: updating to `v0.11.19` of `github.com/Azure/go-autorest/autorest` ([#12209](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12209)) +* dependencies: updating to `v0.9.14` of `github.com/Azure/go-autorest/autorest/adal` ([#12209](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12209)) +* dependencies: updating the embedded SDK for Eventhub Namespaces to use API Version `2021-01-01-preview` ([#12290](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12290)) +* `azurerm_express_route_circuit_peering` - support for the `bandwidth_in_gbps` and `express_route_port_id` properties ([#12289](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12289)) +* `azurerm_kusto_iothub_data_connection` - support for the `data_format`, `mapping_rule_name` and `table_name` properties ([#12293](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12293)) +* `azurerm_linux_virtual_machine` - updating `proximity_placement_group_id` will no longer create a new resoruce ([#11790](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11790)) +* `azurerm_security_center_assessment_metadata` - support for the `categories` property ([#12278](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12278)) +* `azurerm_windows_virtual_machine` - updating `proximity_placement_group_id` will no longer create a new resoruce ([#11790](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11790)) + +BUG FIXES: + +* `azurerm_data_factory` - fix a bug where the `name` property was stored with the wrong casing ([#12128](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12128)) + +## 2.64.0 (June 18, 2021) + +FEATURES: + +* **New Data Source** `azurerm_key_vault_secrets` ([#12147](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12147)) +* **New Resource** `azurerm_api_management_redis_cache` ([#12174](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12174)) +* **New Resource** `azurerm_data_factory_linked_service_odata` ([#11556](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11556)) +* **New Resource** `azurerm_data_protection_backup_policy_postgresql` ([#12072](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12072)) +* **New Resource** `azurerm_machine_learning_compute_cluster` ([#11675](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11675)) +* **New Resource** `azurerm_eventhub_namespace_customer_managed_key` ([#12159](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12159)) +* **New Resource** `azurerm_virtual_desktop_application` ([#12077](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12077)) + +ENHANCEMENTS: + +* dependencies: updating to `v55.2.0` of `github.com/Azure/azure-sdk-for-go` ([#12153](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12153)) +* dependencies: updating `synapse` to use API Version `2021-03-01` ([#12183](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12183)) +* `azurerm_api_management` - support for the `client_certificate_enabled`, `gateway_disabled`, `min_api_version`, and `zones` propeties ([#12125](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12125)) +* `azurerm_api_management_api_schema` - prevent plan not empty after apply for json definitions ([#12039](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12039)) +* `azurerm_application_gateway` - correctly poopulat the `identity` block ([#12226](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12226)) +* `azurerm_container_registry` - support for the `zone_redundancy_enabled` field ([#11706](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11706)) +* `azurerm_cosmosdb_sql_container` - support for the `spatial_index` block ([#11625](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11625)) +* `azurerm_cosmos_gremlin_graph` - support for the `spatial_index` property ([#12176](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12176)) +* `azurerm_data_factory` - support for `global_parameter` ([#12178](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12178)) +* `azurerm_kubernetes_cluster` - support for the `kubelet_config` and `linux_os_config` blocks ([#11119](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11119)) +* `azurerm_monitor_metric_alert` - support the `StartsWith` dimension operator ([#12181](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12181)) +* `azurerm_private_link_service` - changing `load_balancer_frontend_ip_configuration_ids` list no longer creates a new resource ([#12250](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12250)) +* `azurerm_stream_analytics_job` - supports for the `identity` block ([#12171](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12171)) +* `azurerm_storage_account` - support for the `share_properties` block ([#12103](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12103)) +* `azurerm_synapse_workspace` - support for the `data_exfiltration_protection_enabled` property ([#12183](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12183)) +* `azurerm_synapse_role_assignment` - support for scopes and new role types ([#11690](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11690)) + +BUG FIXES: + +* `azurerm_synapse_role_assignment` - support new roles and scopes ([#11690](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11690)) +* `azurerm_lb` - fix zone behaviour bug introduced in recent API upgrade ([#12208](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12208)) + +## 2.63.0 (June 11, 2021) + +FEATURES: + +* **New Resource** `azurerm_data_factory_linked_service_azure_search` ([#12122](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12122)) +* **New Resource** `azurerm_data_factory_linked_service_kusto` ([#12152](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12152)) + +ENHANCEMENTS: + +* dependencies: updating `streamanalytics` to use API Version `2020-03-01-preview` ([#12133](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12133)) +* dependencies: updating `virtualdesktop` to use API Version `2020-11-02-preview` ([#12160](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12160)) +* `data.azurerm_synapse_workspace` - support for the `identity` attribute ([#12098](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12098)) +* `azurerm_cosmosdb_gremlin_graph` - support for the `composite_index` and `partition_key_version` properties ([#11693](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11693)) +* `azurerm_data_factory_dataset_azure_blob` - support for the `dynamic_filename_enabled` and `dynamic_path_enabled` properties ([#12034](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12034)) +* `azurerm_data_factory_dataset_delimited_text` - supports the `azure_blob_fs_location` property ([#12041](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12041)) +* `azurerm_data_factory_linked_service_azure_sql_database` - support for the `key_vault_connection_string` property ([#12139](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12139)) +* `azurerm_data_factory_linked_service_sql_server` - add `key_vault_connection_string` argument ([#12117](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12117)) +* `azurerm_data_factory_linked_service_data_lake_storage_gen2` - supports for the `storage_account_key` property ([#12136](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12136)) +* `azurerm_eventhub` - support for the `status` property ([#12043](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12043)) +* `azurerm_kubernetes_cluster` - support migration of `service_principal` to `identity` ([#12049](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12049)) +* `azurerm_kubernetes_cluster` -support for BYO `kubelet_identity` ([#12037](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12037)) +* `azurerm_kusto_cluster_customer_managed_key` - supports for the `user_identity` property ([#12135](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12135)) +* `azurerm_network_watcher_flow_log` - support for the `location` and `tags` properties ([#11670](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11670)) +* `azurerm_storage_account` - support for user assigned identities ([#11752](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11752)) +* `azurerm_storage_account_customer_managed_key` - support the use of keys from key vaults in remote subscription ([#12142](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12142)) +* `azurerm_virtual_desktop_host_pool` - support for the `start_vm_on_connect` property ([#12160](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12160)) +* `azurerm_vpn_server_configuration` - now supports multiple `auth` blocks ([#12085](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12085)) + +BUG FIXES: + +* Service: App Configuration - Fixed a bug in tags on resources all being set to the same value ([#12062](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12062)) +* Service: Event Hubs - Fixed a bug in tags on resources all being set to the same value ([#12062](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12062)) +* `azurerm_subscription` - fix ability to specify `DevTest` as `workload` ([#12066](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12066)) +* `azurerm_sentinel_alert_rule_scheduled` - the query frequency duration can noe be up to 14 days ([#12164](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12164)) + +## 2.62.1 (June 08, 2021) + +BUG FIXES: + +* `azurerm_role_assignment` - use the correct ID when assigning roles to resources ([#12076](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12076)) + + +## 2.62.0 (June 04, 2021) + +FEATURES: + +* **New Resource** `azurerm_data_protection_backup_vault` ([#11955](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11955)) +* **New Resource** `azurerm_postgresql_flexible_server_firewall_rule` ([#11834](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11834)) +* **New Resource** `azurerm_vmware_express_route_authorization` ([#11812](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11812)) +* **New Resource** `azurerm_storage_object_replication_policy` ([#11744](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11744)) + +ENHANCEMENTS: + +* dependencies: updating `network` to use API Version `2020-11-01` ([#11627](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11627)) +* `azurerm_app_service_environment` - support for the `internal_ip_address`, `service_ip_address`, and `outbound_ip_addresses`properties ([#12026](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12026)) +* `azurerm_api_management_api_subscription` - support for the `api_id` property ([#12025](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12025)) +* `azurerm_container_registry` - support for versionless encryption keys for ACR ([#11856](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11856)) +* `azurerm_kubernetes_cluster` - support for `gateway_name` for Application Gateway add-on ([#11984](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11984)) +* `azurerm_kubernetes_cluster` - support update of `azure_rbac_enabled` ([#12029](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12029)) +* `azurerm_kubernetes_cluster` - support for `node_public_ip_prefix_id` ([#11635](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11635)) +* `azurerm_kubernetes_cluster_node_pool` - support for `node_public_ip_prefix_id` ([#11635](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11635)) +* `azurerm_machine_learning_inference_cluster` - support for the `ssl.leaf_domain_label` and `ssl.overwrite_existing_domain` properties ([#11830](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11830)) +* `azurerm_role_assignment` - support the `delegated_managed_identity_resource_id` property ([#11848](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11848)) + +BUG FIXES: + +* `azuerrm_postgres_server` - do no update `password` unless its changed ([#12008](https://github.com/terraform-providers/terraform-provider-azurerm/issues/12008)) +* `azuerrm_storage_acount` - prevent `containerDeleteRetentionPolicy` and `lastAccessTimeTrackingPolicy` not supported in `AzureUSGovernment` errors ([#11960](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11960)) + +## 2.61.0 (May 27, 2021) FEATURES: -* **New Data Source:** `azurerm_spatial_anchors_account` [GH-11824] +* **New Data Source:** `azurerm_spatial_anchors_account` ([#11824](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11824)) ENHANCEMENTS: -* dependencies: updating to `v54.3.0` of `github.com/Azure/azure-sdk-for-go` [GH-11813] -* dependencies: updating `mixedreality` to use API Version `2021-01-01` [GH-11824] -* `azurerm_data_factory_linked_service_sftp`: support for hostkey related properties [GH-11825] -* `azurerm_spatial_anchors_account` - support for `account_domain` and `account_id` [GH-11824] -* `azurerm_static_site`: Add support for `tags` attribute [GH-11849] +* dependencies: updating to `v54.3.0` of `github.com/Azure/azure-sdk-for-go` ([#11813](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11813)) +* dependencies: updating `mixedreality` to use API Version `2021-01-01` ([#11824](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11824)) +* refactor: switching to use an embedded SDK for `appconfiguration` ([#11959](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11959)) +* refactor: switching to use an embedded SDK for `eventhub` ([#11973](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11973)) +* provider: support for the Virtual Machine `skip_shutdown_and_force_delete` feature ([#11216](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11216)) +* provider: support for the Virtual Machine Scale Set `force_delete` feature ([#11216](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11216)) +* provider: no longer auto register the Microsoft.DevSpaces RP ([#11822](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11822)) +* Data Source: `azurerm_key_vault_certificate_data` - support certificate bundles and add support for ECDSA keys ([#11974](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11974)) +* `azurerm_data_factory_linked_service_sftp` - support for hostkey related properties ([#11825](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11825)) +* `azurerm_spatial_anchors_account` - support for `account_domain` and `account_id` ([#11824](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11824)) +* `azurerm_static_site` - Add support for `tags` attribute ([#11849](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11849)) +* `azurerm_storage_account` - `private_link_access` supports more values ([#11957](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11957)) +* `azurerm_storage_account_network_rules`: `private_link_access` supports more values ([#11957](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11957)) +* `azurerm_synapse_spark_pool` - `spark_version` now supports `3.0` ([#11972](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11972)) BUG FIXES: -* `azurerm_cdn_endpoint` - do not send an empty `origin_host_header` to the api [GH-11852] -* `azurerm_windows_virtual_machine_scale_set`: changing the `disable_automatic_rollback` and `enable_automatic_os_upgrade` properties no longer created a new resource [GH-11723] -* `azurerm_linux_virtual_machine_scale_set`: changing the `disable_automatic_rollback` and `enable_automatic_os_upgrade` properties no longer created a new resource [GH-11723] +* `azurerm_cdn_endpoint` - do not send an empty `origin_host_header` to the api ([#11852](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11852)) +* `azurerm_linux_virtual_machine_scale_set`: changing the `disable_automatic_rollback` and `enable_automatic_os_upgrade` properties no longer created a new resource ([#11723](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11723)) +* `azurerm_storage_share`: Fix ID for `resource_manager_id` ([#11828](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11828)) +* `azurerm_windows_virtual_machine_scale_set`: changing the `disable_automatic_rollback` and `enable_automatic_os_upgrade` properties no longer created a new resource ([#11723](https://github.com/terraform-providers/terraform-provider-azurerm/issues/11723)) ## 2.60.0 (May 20, 2021) diff --git a/GNUmakefile b/GNUmakefile index fa3be621e7cf..4888ee3316ef 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -54,7 +54,7 @@ generate: goimports: @echo "==> Fixing imports code with goimports..." - goimports -w $(PKG_NAME)/ + @find . -name '*.go' | grep -v vendor | grep -v generator-resource-id | while read f; do ./scripts/goimport-file.sh "$$f"; done lint: ./scripts/run-lint.sh diff --git a/azurerm/helpers/azure/resourceid.go b/azurerm/helpers/azure/resourceid.go index b000ef94d6f2..7edd6f0aaffd 100644 --- a/azurerm/helpers/azure/resourceid.go +++ b/azurerm/helpers/azure/resourceid.go @@ -11,10 +11,11 @@ import ( // level fields, and other key-value pairs available via a map in the // Path field. type ResourceID struct { - SubscriptionID string - ResourceGroup string - Provider string - Path map[string]string + SubscriptionID string + ResourceGroup string + Provider string + SecondaryProvider string + Path map[string]string } // ParseAzureResourceID converts a long-form Azure Resource Manager ID @@ -40,6 +41,7 @@ func ParseAzureResourceID(id string) (*ResourceID, error) { } var subscriptionID string + var provider string // Put the constituent key-value pairs into a map componentMap := make(map[string]string, len(components)/2) @@ -52,11 +54,16 @@ func ParseAzureResourceID(id string) (*ResourceID, error) { return nil, fmt.Errorf("Key/Value cannot be empty strings. Key: '%s', Value: '%s'", key, value) } - // Catch the subscriptionID before it can be overwritten by another "subscriptions" - // value in the ID which is the case for the Service Bus subscription resource - if key == "subscriptions" && subscriptionID == "" { + switch { + case key == "subscriptions" && subscriptionID == "": + // Catch the subscriptionID before it can be overwritten by another "subscriptions" + // value in the ID which is the case for the Service Bus subscription resource subscriptionID = value - } else { + case key == "providers" && provider == "": + // Catch the provider before it can be overwritten by another "providers" + // value in the ID which can be the case for the Role Assignment resource + provider = value + default: componentMap[key] = value } } @@ -82,9 +89,12 @@ func ParseAzureResourceID(id string) (*ResourceID, error) { delete(componentMap, "resourcegroups") } - // It is OK not to have a provider in the case of a resource group - if provider, ok := componentMap["providers"]; ok { + if provider != "" { idObj.Provider = provider + } + + if secondaryProvider := componentMap["providers"]; secondaryProvider != "" { + idObj.SecondaryProvider = secondaryProvider delete(componentMap, "providers") } diff --git a/azurerm/helpers/azure/resourceid_test.go b/azurerm/helpers/azure/resourceid_test.go index aa69bc8d8174..6d85a5fd4ac6 100644 --- a/azurerm/helpers/azure/resourceid_test.go +++ b/azurerm/helpers/azure/resourceid_test.go @@ -147,6 +147,20 @@ func TestParseAzureResourceID(t *testing.T) { }, false, }, + { + "/subscriptions/11111111-1111-1111-1111-111111111111/resourceGroups/example-resources/providers/Microsoft.Storage/storageAccounts/nameStorageAccount/providers/Microsoft.Authorization/roleAssignments/22222222-2222-2222-2222-222222222222", + &azure.ResourceID{ + SubscriptionID: "11111111-1111-1111-1111-111111111111", + ResourceGroup: "example-resources", + Provider: "Microsoft.Storage", + SecondaryProvider: "Microsoft.Authorization", + Path: map[string]string{ + "storageAccounts": "nameStorageAccount", + "roleAssignments": "22222222-2222-2222-2222-222222222222", + }, + }, + false, + }, { // missing resource group "/subscriptions/11111111-1111-1111-1111-111111111111/providers/Microsoft.ApiManagement/service/service1/subscriptions/22222222-2222-2222-2222-222222222222", diff --git a/azurerm/helpers/validate/float.go b/azurerm/helpers/validate/float.go index 3eb24a65d39b..199505be6f42 100644 --- a/azurerm/helpers/validate/float.go +++ b/azurerm/helpers/validate/float.go @@ -2,14 +2,12 @@ package validate import ( "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) // FloatInSlice returns a SchemaValidateFunc which tests if the provided value // is of type float64 and matches the value of an element in the valid slice // -func FloatInSlice(valid []float64) schema.SchemaValidateFunc { +func FloatInSlice(valid []float64) func(interface{}, string) ([]string, []error) { return func(i interface{}, k string) (warnings []string, errors []error) { v, ok := i.(float64) if !ok { diff --git a/azurerm/helpers/validate/port_or_port_range.go b/azurerm/helpers/validate/port_or_port_range.go index 00f3d851630a..4169cf201104 100644 --- a/azurerm/helpers/validate/port_or_port_range.go +++ b/azurerm/helpers/validate/port_or_port_range.go @@ -4,11 +4,9 @@ import ( "fmt" "regexp" "strconv" - - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) -func PortOrPortRangeWithin(min int, max int) schema.SchemaValidateFunc { +func PortOrPortRangeWithin(min int, max int) func(interface{}, string) ([]string, []error) { return func(i interface{}, k string) (warnings []string, errors []error) { v, ok := i.(string) if !ok { diff --git a/azurerm/helpers/validate/strings.go b/azurerm/helpers/validate/strings.go index d40a05d89ddc..59de1d3f6fc8 100644 --- a/azurerm/helpers/validate/strings.go +++ b/azurerm/helpers/validate/strings.go @@ -1,10 +1,29 @@ package validate import ( + "encoding/base64" "fmt" "strings" ) +// Base64EncodedString validates that the string is base64 encoded +func Base64EncodedString(i interface{}, k string) ([]string, []error) { + v, ok := i.(string) + if !ok { + return nil, []error{fmt.Errorf("expected type of %q to be string", k)} + } + + if strings.TrimSpace(v) == "" { + return nil, []error{fmt.Errorf("%q must not be empty", k)} + } + + if _, err := base64.StdEncoding.DecodeString(v); err != nil { + return nil, []error{fmt.Errorf("%q must be a valid base64 encoded string", k)} + } + + return nil, nil +} + // LowerCasedString validates that the string is lower-cased func LowerCasedString(i interface{}, k string) ([]string, []error) { v, ok := i.(string) diff --git a/azurerm/helpers/validate/strings_test.go b/azurerm/helpers/validate/strings_test.go index 4d476653f586..8813a69d122f 100644 --- a/azurerm/helpers/validate/strings_test.go +++ b/azurerm/helpers/validate/strings_test.go @@ -4,6 +4,34 @@ import ( "testing" ) +func TestBase64EncodedString(t *testing.T) { + cases := []struct { + Input string + Errors int + }{ + { + Input: "", + Errors: 1, + }, + { + Input: "aGVsbG8td29ybGQ=", + Errors: 0, + }, + { + Input: "hello-world", + Errors: 1, + }, + } + + for _, tc := range cases { + t.Run(tc.Input, func(t *testing.T) { + if _, errors := Base64EncodedString(tc.Input, "base64"); len(errors) != tc.Errors { + t.Fatalf("Expected Base64 string to have %d not %d errors for %q: %v", tc.Errors, len(errors), tc.Input, errors) + } + }) + } +} + func TestLowerCasedStrings(t *testing.T) { cases := []struct { Value string diff --git a/azurerm/helpers/validate/time.go b/azurerm/helpers/validate/time.go index b697014fcc00..763fe4e1906c 100644 --- a/azurerm/helpers/validate/time.go +++ b/azurerm/helpers/validate/time.go @@ -4,11 +4,9 @@ import ( "fmt" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" - iso8601 "github.com/btubbs/datetime" "github.com/rickb777/date/period" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" ) func ISO8601Duration(i interface{}, k string) (warnings []string, errors []error) { @@ -64,7 +62,7 @@ func ISO8601DateTime(i interface{}, k string) (warnings []string, errors []error return warnings, errors } -func AzureTimeZoneString() schema.SchemaValidateFunc { +func AzureTimeZoneString() func(interface{}, string) ([]string, []error) { // List collected from https://support.microsoft.com/en-gb/help/973627/microsoft-time-zone-index-values // TODO look into programatic retrieval https://docs.microsoft.com/en-us/rest/api/maps/timezone/gettimezoneenumwindows validTimeZones := []string{ diff --git a/azurerm/internal/acceptance/check/that.go b/azurerm/internal/acceptance/check/that.go index 596a1779a325..969590b3074a 100644 --- a/azurerm/internal/acceptance/check/that.go +++ b/azurerm/internal/acceptance/check/that.go @@ -5,8 +5,8 @@ import ( "fmt" "regexp" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/helpers" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/testclient" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/types" diff --git a/azurerm/internal/acceptance/data.go b/azurerm/internal/acceptance/data.go index 3c1f525acbcf..cae94a0064d5 100644 --- a/azurerm/internal/acceptance/data.go +++ b/azurerm/internal/acceptance/data.go @@ -3,22 +3,25 @@ package acceptance import ( "fmt" "math" + "math/rand" "os" "strconv" "testing" "github.com/Azure/go-autorest/autorest/azure" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" ) +const ( + // charSetAlphaNum is the alphanumeric character set for use with randStringFromCharSet + charSetAlphaNum = "abcdefghijklmnopqrstuvwxyz012346789" +) + func init() { // unit testing if os.Getenv("TF_ACC") == "" { return } - - EnsureProvidersAreInitialised() } type TestData struct { @@ -55,8 +58,6 @@ type TestData struct { // BuildTestData generates some test data for the given resource func BuildTestData(t *testing.T, resourceType string, resourceLabel string) TestData { - EnsureProvidersAreInitialised() - env, err := Environment() if err != nil { t.Fatalf("Error retrieving Environment: %+v", err) @@ -64,7 +65,7 @@ func BuildTestData(t *testing.T, resourceType string, resourceLabel string) Test testData := TestData{ RandomInteger: RandTimeInt(), - RandomString: acctest.RandString(5), + RandomString: randString(5), ResourceName: fmt.Sprintf("%s.%s", resourceType, resourceLabel), Environment: *env, EnvironmentName: EnvironmentName(), @@ -122,5 +123,20 @@ func (td *TestData) RandomStringOfLength(len int) string { panic("Invalid Test: RandomStringOfLength: length argument must be between 1 and 1024 characters") } - return acctest.RandString(len) + return randString(len) +} + +// randString generates a random alphanumeric string of the length specified +func randString(strlen int) string { + return randStringFromCharSet(strlen, charSetAlphaNum) +} + +// randStringFromCharSet generates a random string by selecting characters from +// the charset provided +func randStringFromCharSet(strlen int, charSet string) string { + result := make([]byte, strlen) + for i := 0; i < strlen; i++ { + result[i] = charSet[rand.Intn(len(charSet))] + } + return string(result) } diff --git a/azurerm/internal/acceptance/helpers/check_destroyed.go b/azurerm/internal/acceptance/helpers/check_destroyed.go index 7bd6aeb2563e..a824b468b253 100644 --- a/azurerm/internal/acceptance/helpers/check_destroyed.go +++ b/azurerm/internal/acceptance/helpers/check_destroyed.go @@ -3,7 +3,7 @@ package helpers import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/types" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" ) diff --git a/azurerm/internal/acceptance/helpers/delete.go b/azurerm/internal/acceptance/helpers/delete.go index 7d9ea74e703b..a1f8ff7a74c1 100644 --- a/azurerm/internal/acceptance/helpers/delete.go +++ b/azurerm/internal/acceptance/helpers/delete.go @@ -3,7 +3,7 @@ package helpers import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/types" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" ) diff --git a/azurerm/internal/acceptance/helpers/exists.go b/azurerm/internal/acceptance/helpers/exists.go index 5e16f25b0be8..fc477288ee17 100644 --- a/azurerm/internal/acceptance/helpers/exists.go +++ b/azurerm/internal/acceptance/helpers/exists.go @@ -3,7 +3,7 @@ package helpers import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/types" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" diff --git a/azurerm/internal/acceptance/plugin_sdk_aliases.go b/azurerm/internal/acceptance/plugin_sdk_aliases.go index 0b271f94b931..df443cb32a18 100644 --- a/azurerm/internal/acceptance/plugin_sdk_aliases.go +++ b/azurerm/internal/acceptance/plugin_sdk_aliases.go @@ -3,8 +3,8 @@ package acceptance import ( "regexp" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) @@ -54,6 +54,12 @@ func TestCheckOutput(name, value string) pluginsdk.TestCheckFunc { return resource.TestCheckOutput(name, value) } +// TestMatchOutput is a wrapper to enable builds to continue +func TestMatchOutput(name string, r *regexp.Regexp) pluginsdk.TestCheckFunc { + // TODO: move this comment up a level in the future + return resource.TestMatchOutput(name, r) +} + // TestMatchResourceAttr is a TestCheckFunc which checks that the value // in state for the given name/key combination matches the given regex. func TestMatchResourceAttr(name, key string, r *regexp.Regexp) pluginsdk.TestCheckFunc { diff --git a/azurerm/internal/acceptance/providers.go b/azurerm/internal/acceptance/providers.go deleted file mode 100644 index 1e846720f9b6..000000000000 --- a/azurerm/internal/acceptance/providers.go +++ /dev/null @@ -1,22 +0,0 @@ -package acceptance - -import ( - "os" - "sync" - - "github.com/hashicorp/terraform-plugin-sdk/acctest" - "github.com/terraform-providers/terraform-provider-azuread/azuread" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/provider" -) - -var once sync.Once - -func EnsureProvidersAreInitialised() { - // require reattach testing is enabled - os.Setenv("TF_ACCTEST_REATTACH", "1") - - once.Do(func() { - acctest.UseBinaryDriver("azurerm", provider.TestAzureProvider) - acctest.UseBinaryDriver("azuread", azuread.Provider) - }) -} diff --git a/azurerm/internal/acceptance/random.go b/azurerm/internal/acceptance/random.go index 3d51720756e2..06a08d2db69e 100644 --- a/azurerm/internal/acceptance/random.go +++ b/azurerm/internal/acceptance/random.go @@ -5,11 +5,9 @@ import ( "strings" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" ) -const CharSetAlpha = acctest.CharSetAlpha - func RandTimeInt() int { // acctest.RantInt() returns a value of size: // 000000000000000000 diff --git a/azurerm/internal/acceptance/ssh/run.go b/azurerm/internal/acceptance/ssh/run.go index 7737e936a786..b6cc1fa4ee9e 100644 --- a/azurerm/internal/acceptance/ssh/run.go +++ b/azurerm/internal/acceptance/ssh/run.go @@ -2,11 +2,12 @@ package ssh import ( "bytes" + "context" "fmt" "log" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "golang.org/x/crypto/ssh" ) @@ -18,8 +19,8 @@ type Runner struct { CommandsToRun []string } -func (r Runner) Run() error { - if err := resource.Retry(5*time.Minute, r.tryRun); err != nil { +func (r Runner) Run(ctx context.Context) error { + if err := resource.RetryContext(ctx, 5*time.Minute, r.tryRun); err != nil { return err } diff --git a/azurerm/internal/acceptance/steps.go b/azurerm/internal/acceptance/steps.go index 0d8f4529f1a8..7d9a159e0673 100644 --- a/azurerm/internal/acceptance/steps.go +++ b/azurerm/internal/acceptance/steps.go @@ -5,8 +5,8 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/helpers" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/testclient" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/types" diff --git a/azurerm/internal/acceptance/testcase.go b/azurerm/internal/acceptance/testcase.go index 3f62e938c372..35fc41a24812 100644 --- a/azurerm/internal/acceptance/testcase.go +++ b/azurerm/internal/acceptance/testcase.go @@ -4,9 +4,9 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" - "github.com/terraform-providers/terraform-provider-azuread/azuread" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/helpers" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/testclient" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/types" @@ -82,31 +82,37 @@ func RunTestsInSequence(t *testing.T, tests map[string]map[string]func(t *testin } func (td TestData) runAcceptanceTest(t *testing.T, testCase resource.TestCase) { - testCase.ProviderFactories = map[string]terraform.ResourceProviderFactory{ - "azuread": func() (terraform.ResourceProvider, error) { - aad := azuread.Provider() - return aad, nil - }, - "azurerm": func() (terraform.ResourceProvider, error) { - azurerm := provider.TestAzureProvider() - return azurerm, nil - }, - } + testCase.ExternalProviders = td.externalProviders() + testCase.ProviderFactories = td.providers() resource.ParallelTest(t, testCase) } func (td TestData) runAcceptanceSequentialTest(t *testing.T, testCase resource.TestCase) { - testCase.ProviderFactories = map[string]terraform.ResourceProviderFactory{ - "azuread": func() (terraform.ResourceProvider, error) { - aad := azuread.Provider() - return aad, nil + testCase.ExternalProviders = td.externalProviders() + testCase.ProviderFactories = td.providers() + + resource.Test(t, testCase) +} + +func (td TestData) providers() map[string]func() (*schema.Provider, error) { + return map[string]func() (*schema.Provider, error){ + "azurerm": func() (*schema.Provider, error) { //nolint:unparam + azurerm := provider.TestAzureProvider() + return azurerm, nil }, - "azurerm": func() (terraform.ResourceProvider, error) { + "azurerm-alt": func() (*schema.Provider, error) { //nolint:unparam azurerm := provider.TestAzureProvider() return azurerm, nil }, } +} - resource.Test(t, testCase) +func (td TestData) externalProviders() map[string]resource.ExternalProvider { + return map[string]resource.ExternalProvider{ + "azuread": { + VersionConstraint: "=1.5.1", + Source: "registry.terraform.io/hashicorp/azuread", + }, + } } diff --git a/azurerm/internal/acceptance/testing.go b/azurerm/internal/acceptance/testing.go index 97b03a086a0f..5447503a1a67 100644 --- a/azurerm/internal/acceptance/testing.go +++ b/azurerm/internal/acceptance/testing.go @@ -9,7 +9,7 @@ import ( "github.com/Azure/go-autorest/autorest/azure" "github.com/hashicorp/go-azure-helpers/authentication" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func PreCheck(t *testing.T) { diff --git a/azurerm/internal/azuresdkhacks/network_interface.go b/azurerm/internal/azuresdkhacks/network_interface.go index f5d295a6b4d6..907950a20be7 100644 --- a/azurerm/internal/azuresdkhacks/network_interface.go +++ b/azurerm/internal/azuresdkhacks/network_interface.go @@ -7,7 +7,7 @@ import ( "io" "net/http" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/Azure/go-autorest/autorest" ) diff --git a/azurerm/internal/clients/builder.go b/azurerm/internal/clients/builder.go index 0904ee9caa29..23d682baa167 100644 --- a/azurerm/internal/clients/builder.go +++ b/azurerm/internal/clients/builder.go @@ -112,6 +112,12 @@ func Build(ctx context.Context, builder ClientBuilder) (*Client, error) { // Key Vault Endpoints keyVaultAuth := builder.AuthConfig.BearerAuthorizerCallback(sender, oauthConfig) + // Batch Management Endpoints + batchManagementAuth, err := builder.AuthConfig.GetAuthorizationToken(sender, oauthConfig, env.BatchManagementEndpoint) + if err != nil { + return nil, fmt.Errorf("unable to get authorization token for batch management endpoint: %+v", err) + } + o := &common.ClientOptions{ SubscriptionId: builder.AuthConfig.SubscriptionID, TenantID: builder.AuthConfig.TenantID, @@ -124,6 +130,7 @@ func Build(ctx context.Context, builder ClientBuilder) (*Client, error) { ResourceManagerEndpoint: endpoint, StorageAuthorizer: storageAuth, SynapseAuthorizer: synapseAuth, + BatchManagementAuthorizer: batchManagementAuth, SkipProviderReg: builder.SkipProviderRegistration, DisableCorrelationRequestID: builder.DisableCorrelationRequestID, CustomCorrelationRequestID: builder.CustomCorrelationRequestID, diff --git a/azurerm/internal/clients/client.go b/azurerm/internal/clients/client.go index 5b87a0ead072..0c4f87f34517 100644 --- a/azurerm/internal/clients/client.go +++ b/azurerm/internal/clients/client.go @@ -33,12 +33,14 @@ import ( databricks "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/databricks/client" datafactory "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/client" datalake "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datalake/client" + dataprotection "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection/client" datashare "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datashare/client" desktopvirtualization "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/desktopvirtualization/client" devspace "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/devspace/client" devtestlabs "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/devtestlabs/client" digitaltwins "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/digitaltwins/client" dns "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dns/client" + domainservices "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/domainservices/client" eventgrid "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventgrid/client" eventhub "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/client" firewall "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/firewall/client" @@ -134,12 +136,14 @@ type Client struct { DataboxEdge *databoxedge.Client DataFactory *datafactory.Client Datalake *datalake.Client + DataProtection *dataprotection.Client DataShare *datashare.Client DesktopVirtualization *desktopvirtualization.Client DevSpace *devspace.Client DevTestLabs *devtestlabs.Client DigitalTwins *digitaltwins.Client Dns *dns.Client + DomainServices *domainservices.Client EventGrid *eventgrid.Client Eventhub *eventhub.Client Firewall *firewall.Client @@ -237,12 +241,14 @@ func (client *Client) Build(ctx context.Context, o *common.ClientOptions) error client.DataboxEdge = databoxedge.NewClient(o) client.DataFactory = datafactory.NewClient(o) client.Datalake = datalake.NewClient(o) + client.DataProtection = dataprotection.NewClient(o) client.DataShare = datashare.NewClient(o) client.DesktopVirtualization = desktopvirtualization.NewClient(o) client.DevSpace = devspace.NewClient(o) client.DevTestLabs = devtestlabs.NewClient(o) client.DigitalTwins = digitaltwins.NewClient(o) client.Dns = dns.NewClient(o) + client.DomainServices = domainservices.NewClient(o) client.EventGrid = eventgrid.NewClient(o) client.Eventhub = eventhub.NewClient(o) client.Firewall = firewall.NewClient(o) diff --git a/azurerm/internal/common/client_options.go b/azurerm/internal/common/client_options.go index e21d811a62c5..a22072052fbb 100644 --- a/azurerm/internal/common/client_options.go +++ b/azurerm/internal/common/client_options.go @@ -8,7 +8,7 @@ import ( "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" "github.com/hashicorp/go-azure-helpers/sender" - "github.com/hashicorp/terraform-plugin-sdk/meta" + "github.com/hashicorp/terraform-plugin-sdk/v2/meta" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" "github.com/terraform-providers/terraform-provider-azurerm/version" ) @@ -26,6 +26,7 @@ type ClientOptions struct { ResourceManagerEndpoint string StorageAuthorizer autorest.Authorizer SynapseAuthorizer autorest.Authorizer + BatchManagementAuthorizer autorest.Authorizer SkipProviderReg bool CustomCorrelationRequestID string diff --git a/azurerm/internal/features/defaults.go b/azurerm/internal/features/defaults.go index 7a54dadb2f7f..d18117f60f33 100644 --- a/azurerm/internal/features/defaults.go +++ b/azurerm/internal/features/defaults.go @@ -3,6 +3,9 @@ package features func Default() UserFeatures { return UserFeatures{ // NOTE: ensure all nested objects are fully populated + CognitiveAccount: CognitiveAccountFeatures{ + PurgeSoftDeleteOnDestroy: true, + }, KeyVault: KeyVaultFeatures{ PurgeSoftDeleteOnDestroy: true, RecoverSoftDeletedKeyVaults: true, @@ -17,10 +20,12 @@ func Default() UserFeatures { DeleteNestedItemsDuringDeletion: true, }, VirtualMachine: VirtualMachineFeatures{ - DeleteOSDiskOnDeletion: true, - GracefulShutdown: false, + DeleteOSDiskOnDeletion: true, + GracefulShutdown: false, + SkipShutdownAndForceDelete: false, }, VirtualMachineScaleSet: VirtualMachineScaleSetFeatures{ + ForceDelete: false, RollInstancesWhenRequired: true, }, } diff --git a/azurerm/internal/features/user_flags.go b/azurerm/internal/features/user_flags.go index 72157d354d99..520f649d538d 100644 --- a/azurerm/internal/features/user_flags.go +++ b/azurerm/internal/features/user_flags.go @@ -1,6 +1,7 @@ package features type UserFeatures struct { + CognitiveAccount CognitiveAccountFeatures VirtualMachine VirtualMachineFeatures VirtualMachineScaleSet VirtualMachineScaleSetFeatures KeyVault KeyVaultFeatures @@ -9,12 +10,18 @@ type UserFeatures struct { LogAnalyticsWorkspace LogAnalyticsWorkspaceFeatures } +type CognitiveAccountFeatures struct { + PurgeSoftDeleteOnDestroy bool +} + type VirtualMachineFeatures struct { - DeleteOSDiskOnDeletion bool - GracefulShutdown bool + DeleteOSDiskOnDeletion bool + GracefulShutdown bool + SkipShutdownAndForceDelete bool } type VirtualMachineScaleSetFeatures struct { + ForceDelete bool RollInstancesWhenRequired bool } diff --git a/azurerm/internal/identity/schema.go b/azurerm/internal/identity/schema.go index 80e587814c8a..72aaac0434b9 100644 --- a/azurerm/internal/identity/schema.go +++ b/azurerm/internal/identity/schema.go @@ -5,14 +5,12 @@ import ( ) const ( - none = "None" - systemAssigned = "SystemAssigned" - userAssigned = "UserAssigned" + none = "None" + systemAssigned = "SystemAssigned" + userAssigned = "UserAssigned" + systemAssignedUserAssigned = "SystemAssigned, UserAssigned" ) -// TODO: support SystemAssigned, UserAssigned -// const systemAssignedUserAssigned = "SystemAssigned, UserAssigned" - type ExpandedConfig struct { // Type is the type of User Assigned Identity, either `None`, `SystemAssigned`, `UserAssigned` // or `SystemAssigned, UserAssigned` diff --git a/azurerm/internal/identity/system_assigned_user_assigned.go b/azurerm/internal/identity/system_assigned_user_assigned.go new file mode 100644 index 000000000000..60de26b21dfa --- /dev/null +++ b/azurerm/internal/identity/system_assigned_user_assigned.go @@ -0,0 +1,130 @@ +package identity + +import ( + "fmt" + + msivalidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/msi/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +var _ Identity = SystemAssignedUserAssigned{} + +type SystemAssignedUserAssigned struct{} + +func (s SystemAssignedUserAssigned) Expand(input []interface{}) (*ExpandedConfig, error) { + if len(input) == 0 || input[0] == nil { + return &ExpandedConfig{ + Type: none, + }, nil + } + + v := input[0].(map[string]interface{}) + + config := &ExpandedConfig{ + Type: v["type"].(string), + } + + identityIds := v["identity_ids"].(*pluginsdk.Set).List() + + if len(identityIds) != 0 { + if config.Type != userAssigned && config.Type != systemAssignedUserAssigned { + return nil, fmt.Errorf("`identity_ids` can only be specified when `type` includes `UserAssigned`") + } + config.UserAssignedIdentityIds = utils.ExpandStringSlice(identityIds) + } + + return config, nil +} + +func (s SystemAssignedUserAssigned) Flatten(input *ExpandedConfig) []interface{} { + if input == nil || input.Type == none { + return []interface{}{} + } + + coalesce := func(input *string) string { + if input == nil { + return "" + } + + return *input + } + + return []interface{}{ + map[string]interface{}{ + "type": input.Type, + "identity_ids": utils.FlattenStringSlice(input.UserAssignedIdentityIds), + "principal_id": coalesce(input.PrincipalId), + "tenant_id": coalesce(input.TenantId), + }, + } +} + +func (s SystemAssignedUserAssigned) Schema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "type": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + userAssigned, + systemAssigned, + systemAssignedUserAssigned, + }, false), + }, + "identity_ids": { + Type: pluginsdk.TypeSet, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: msivalidate.UserAssignedIdentityID, + }, + }, + "principal_id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "tenant_id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + }, + }, + } +} + +func (s SystemAssignedUserAssigned) SchemaDataSource() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "type": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "identity_ids": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + "principal_id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "tenant_id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + }, + }, + } +} diff --git a/azurerm/internal/identity/user_assigned.go b/azurerm/internal/identity/user_assigned.go index 74706d120a7f..6e8be6b68481 100644 --- a/azurerm/internal/identity/user_assigned.go +++ b/azurerm/internal/identity/user_assigned.go @@ -1,8 +1,10 @@ package identity import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + msivalidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/msi/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) var _ Identity = UserAssigned{} @@ -16,8 +18,11 @@ func (u UserAssigned) Expand(input []interface{}) (*ExpandedConfig, error) { }, nil } + v := input[0].(map[string]interface{}) + return &ExpandedConfig{ - Type: systemAssigned, + Type: userAssigned, + UserAssignedIdentityIds: utils.ExpandStringSlice(v["identity_ids"].(*pluginsdk.Set).List()), }, nil } @@ -26,19 +31,10 @@ func (u UserAssigned) Flatten(input *ExpandedConfig) []interface{} { return []interface{}{} } - coalesce := func(input *string) string { - if input == nil { - return "" - } - - return *input - } - return []interface{}{ map[string]interface{}{ "type": input.Type, - "principal_id": coalesce(input.PrincipalId), - "tenant_id": coalesce(input.TenantId), + "identity_ids": utils.FlattenStringSlice(input.UserAssignedIdentityIds), }, } } @@ -58,11 +54,11 @@ func (u UserAssigned) Schema() *pluginsdk.Schema { }, false), }, "identity_ids": { - Type: pluginsdk.TypeList, + Type: pluginsdk.TypeSet, Required: true, Elem: &pluginsdk.Schema{ Type: pluginsdk.TypeString, - ValidateFunc: validation.NoZeroValues, + ValidateFunc: msivalidate.UserAssignedIdentityID, }, }, }, diff --git a/azurerm/internal/provider/features.go b/azurerm/internal/provider/features.go index 29e41105f923..c6f8d73702ad 100644 --- a/azurerm/internal/provider/features.go +++ b/azurerm/internal/provider/features.go @@ -1,6 +1,7 @@ package provider import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) @@ -10,6 +11,21 @@ func schemaFeatures(supportLegacyTestSuite bool) *pluginsdk.Schema { // specifying the block otherwise) - however for 2+ they should be optional features := map[string]*pluginsdk.Schema{ // lintignore:XS003 + "cognitive_account": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "purge_soft_delete_on_destroy": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: true, + }, + }, + }, + }, + "key_vault": { Type: pluginsdk.TypeList, Optional: true, @@ -84,6 +100,10 @@ func schemaFeatures(supportLegacyTestSuite bool) *pluginsdk.Schema { Type: pluginsdk.TypeBool, Optional: true, }, + "skip_shutdown_and_force_delete": { + Type: schema.TypeBool, + Optional: true, + }, }, }, }, @@ -94,6 +114,10 @@ func schemaFeatures(supportLegacyTestSuite bool) *pluginsdk.Schema { MaxItems: 1, Elem: &pluginsdk.Resource{ Schema: map[string]*pluginsdk.Schema{ + "force_delete": { + Type: pluginsdk.TypeBool, + Optional: true, + }, "roll_instances_when_required": { Type: pluginsdk.TypeBool, Required: true, @@ -136,6 +160,16 @@ func expandFeatures(input []interface{}) features.UserFeatures { val := input[0].(map[string]interface{}) + if raw, ok := val["cognitive_account"]; ok { + items := raw.([]interface{}) + if len(items) > 0 && items[0] != nil { + cognitiveRaw := items[0].(map[string]interface{}) + if v, ok := cognitiveRaw["purge_soft_delete_on_destroy"]; ok { + features.CognitiveAccount.PurgeSoftDeleteOnDestroy = v.(bool) + } + } + } + if raw, ok := val["key_vault"]; ok { items := raw.([]interface{}) if len(items) > 0 && items[0] != nil { @@ -189,6 +223,9 @@ func expandFeatures(input []interface{}) features.UserFeatures { if v, ok := virtualMachinesRaw["graceful_shutdown"]; ok { features.VirtualMachine.GracefulShutdown = v.(bool) } + if v, ok := virtualMachinesRaw["skip_shutdown_and_force_delete"]; ok { + features.VirtualMachine.SkipShutdownAndForceDelete = v.(bool) + } } } @@ -199,6 +236,9 @@ func expandFeatures(input []interface{}) features.UserFeatures { if v, ok := scaleSetRaw["roll_instances_when_required"]; ok { features.VirtualMachineScaleSet.RollInstancesWhenRequired = v.(bool) } + if v, ok := scaleSetRaw["force_delete"]; ok { + features.VirtualMachineScaleSet.ForceDelete = v.(bool) + } } } diff --git a/azurerm/internal/provider/features_test.go b/azurerm/internal/provider/features_test.go index 86a35d915089..8daebc8fce40 100644 --- a/azurerm/internal/provider/features_test.go +++ b/azurerm/internal/provider/features_test.go @@ -18,10 +18,16 @@ func TestExpandFeatures(t *testing.T) { Name: "Empty Block", Input: []interface{}{}, Expected: features.UserFeatures{ + CognitiveAccount: features.CognitiveAccountFeatures{ + PurgeSoftDeleteOnDestroy: true, + }, KeyVault: features.KeyVaultFeatures{ PurgeSoftDeleteOnDestroy: true, RecoverSoftDeletedKeyVaults: true, }, + LogAnalyticsWorkspace: features.LogAnalyticsWorkspaceFeatures{ + PermanentlyDeleteOnDestroy: false, + }, Network: features.NetworkFeatures{ RelaxedLocking: false, }, @@ -29,20 +35,25 @@ func TestExpandFeatures(t *testing.T) { DeleteNestedItemsDuringDeletion: true, }, VirtualMachine: features.VirtualMachineFeatures{ - DeleteOSDiskOnDeletion: true, + DeleteOSDiskOnDeletion: true, + GracefulShutdown: false, + SkipShutdownAndForceDelete: false, }, VirtualMachineScaleSet: features.VirtualMachineScaleSetFeatures{ + ForceDelete: false, RollInstancesWhenRequired: true, }, - LogAnalyticsWorkspace: features.LogAnalyticsWorkspaceFeatures{ - PermanentlyDeleteOnDestroy: false, - }, }, }, { Name: "Complete Enabled", Input: []interface{}{ map[string]interface{}{ + "cognitive_account": []interface{}{ + map[string]interface{}{ + "purge_soft_delete_on_destroy": true, + }, + }, "key_vault": []interface{}{ map[string]interface{}{ "purge_soft_delete_on_destroy": true, @@ -66,18 +77,23 @@ func TestExpandFeatures(t *testing.T) { }, "virtual_machine": []interface{}{ map[string]interface{}{ - "delete_os_disk_on_deletion": true, - "graceful_shutdown": true, + "delete_os_disk_on_deletion": true, + "graceful_shutdown": true, + "skip_shutdown_and_force_delete": true, }, }, "virtual_machine_scale_set": []interface{}{ map[string]interface{}{ "roll_instances_when_required": true, + "force_delete": true, }, }, }, }, Expected: features.UserFeatures{ + CognitiveAccount: features.CognitiveAccountFeatures{ + PurgeSoftDeleteOnDestroy: true, + }, KeyVault: features.KeyVaultFeatures{ PurgeSoftDeleteOnDestroy: true, RecoverSoftDeletedKeyVaults: true, @@ -92,11 +108,13 @@ func TestExpandFeatures(t *testing.T) { DeleteNestedItemsDuringDeletion: true, }, VirtualMachine: features.VirtualMachineFeatures{ - DeleteOSDiskOnDeletion: true, - GracefulShutdown: true, + DeleteOSDiskOnDeletion: true, + GracefulShutdown: true, + SkipShutdownAndForceDelete: true, }, VirtualMachineScaleSet: features.VirtualMachineScaleSetFeatures{ RollInstancesWhenRequired: true, + ForceDelete: true, }, }, }, @@ -104,10 +122,20 @@ func TestExpandFeatures(t *testing.T) { Name: "Complete Disabled", Input: []interface{}{ map[string]interface{}{ - "virtual_machine": []interface{}{ + "cognitive_account": []interface{}{ map[string]interface{}{ - "delete_os_disk_on_deletion": false, - "graceful_shutdown": false, + "purge_soft_delete_on_destroy": false, + }, + }, + "key_vault": []interface{}{ + map[string]interface{}{ + "purge_soft_delete_on_destroy": false, + "recover_soft_deleted_key_vaults": false, + }, + }, + "log_analytics_workspace": []interface{}{ + map[string]interface{}{ + "permanently_delete_on_destroy": false, }, }, "network_locking": []interface{}{ @@ -120,25 +148,25 @@ func TestExpandFeatures(t *testing.T) { "delete_nested_items_during_deletion": false, }, }, - "virtual_machine_scale_set": []interface{}{ + "virtual_machine": []interface{}{ map[string]interface{}{ - "roll_instances_when_required": false, + "delete_os_disk_on_deletion": false, + "graceful_shutdown": false, + "skip_shutdown_and_force_delete": false, }, }, - "key_vault": []interface{}{ - map[string]interface{}{ - "purge_soft_delete_on_destroy": false, - "recover_soft_deleted_key_vaults": false, - }, - }, - "log_analytics_workspace": []interface{}{ + "virtual_machine_scale_set": []interface{}{ map[string]interface{}{ - "permanently_delete_on_destroy": false, + "force_delete": false, + "roll_instances_when_required": false, }, }, }, }, Expected: features.UserFeatures{ + CognitiveAccount: features.CognitiveAccountFeatures{ + PurgeSoftDeleteOnDestroy: false, + }, KeyVault: features.KeyVaultFeatures{ PurgeSoftDeleteOnDestroy: false, RecoverSoftDeletedKeyVaults: false, @@ -153,10 +181,12 @@ func TestExpandFeatures(t *testing.T) { DeleteNestedItemsDuringDeletion: false, }, VirtualMachine: features.VirtualMachineFeatures{ - DeleteOSDiskOnDeletion: false, - GracefulShutdown: false, + DeleteOSDiskOnDeletion: false, + GracefulShutdown: false, + SkipShutdownAndForceDelete: false, }, VirtualMachineScaleSet: features.VirtualMachineScaleSetFeatures{ + ForceDelete: false, RollInstancesWhenRequired: false, }, }, @@ -172,6 +202,71 @@ func TestExpandFeatures(t *testing.T) { } } +func TestExpandFeaturesCognitiveServices(t *testing.T) { + testData := []struct { + Name string + Input []interface{} + EnvVars map[string]interface{} + Expected features.UserFeatures + }{ + { + Name: "Empty Block", + Input: []interface{}{ + map[string]interface{}{ + "cognitive_account": []interface{}{}, + }, + }, + Expected: features.UserFeatures{ + CognitiveAccount: features.CognitiveAccountFeatures{ + PurgeSoftDeleteOnDestroy: true, + }, + }, + }, + { + Name: "Purge on Destroy Enabled", + Input: []interface{}{ + map[string]interface{}{ + "cognitive_account": []interface{}{ + map[string]interface{}{ + "purge_soft_delete_on_destroy": true, + }, + }, + }, + }, + Expected: features.UserFeatures{ + CognitiveAccount: features.CognitiveAccountFeatures{ + PurgeSoftDeleteOnDestroy: true, + }, + }, + }, + { + Name: "Purge on Destroy Disabled", + Input: []interface{}{ + map[string]interface{}{ + "cognitive_account": []interface{}{ + map[string]interface{}{ + "purge_soft_delete_on_destroy": false, + }, + }, + }, + }, + Expected: features.UserFeatures{ + CognitiveAccount: features.CognitiveAccountFeatures{ + PurgeSoftDeleteOnDestroy: false, + }, + }, + }, + } + + for _, testCase := range testData { + t.Logf("[DEBUG] Test Case: %q", testCase.Name) + result := expandFeatures(testCase.Input) + if !reflect.DeepEqual(result.CognitiveAccount, testCase.Expected.CognitiveAccount) { + t.Fatalf("Expected %+v but got %+v", result.CognitiveAccount, testCase.Expected.CognitiveAccount) + } + } +} + func TestExpandFeaturesKeyVault(t *testing.T) { testData := []struct { Name string @@ -388,46 +483,94 @@ func TestExpandFeaturesVirtualMachine(t *testing.T) { }, Expected: features.UserFeatures{ VirtualMachine: features.VirtualMachineFeatures{ - DeleteOSDiskOnDeletion: true, - GracefulShutdown: false, + DeleteOSDiskOnDeletion: true, + GracefulShutdown: false, + SkipShutdownAndForceDelete: false, }, }, }, { - Name: "Delete OS Disk and Graceful Shutdown Enabled", + Name: "Delete OS Disk Enabled", Input: []interface{}{ map[string]interface{}{ "virtual_machine": []interface{}{ map[string]interface{}{ "delete_os_disk_on_deletion": true, - "graceful_shutdown": true, + "graceful_shutdown": false, + "force_delete": false, + "shutdown_before_deletion": false, }, }, }, }, Expected: features.UserFeatures{ VirtualMachine: features.VirtualMachineFeatures{ - DeleteOSDiskOnDeletion: true, - GracefulShutdown: true, + DeleteOSDiskOnDeletion: true, + GracefulShutdown: false, + SkipShutdownAndForceDelete: false, }, }, }, { - Name: "Delete OS Disk and Graceful Shutdown Disabled", + Name: "Graceful Shutdown Enabled", Input: []interface{}{ map[string]interface{}{ "virtual_machine": []interface{}{ map[string]interface{}{ "delete_os_disk_on_deletion": false, - "graceful_shutdown": false, + "graceful_shutdown": true, + "force_delete": false, }, }, }, }, Expected: features.UserFeatures{ VirtualMachine: features.VirtualMachineFeatures{ - DeleteOSDiskOnDeletion: false, - GracefulShutdown: false, + DeleteOSDiskOnDeletion: false, + GracefulShutdown: true, + SkipShutdownAndForceDelete: false, + }, + }, + }, + { + Name: "Skip Shutdown and Force Delete Enabled", + Input: []interface{}{ + map[string]interface{}{ + "virtual_machine": []interface{}{ + map[string]interface{}{ + "delete_os_disk_on_deletion": false, + "graceful_shutdown": false, + "skip_shutdown_and_force_delete": true, + }, + }, + }, + }, + Expected: features.UserFeatures{ + VirtualMachine: features.VirtualMachineFeatures{ + DeleteOSDiskOnDeletion: false, + GracefulShutdown: false, + SkipShutdownAndForceDelete: true, + }, + }, + }, + { + Name: "All Disabled", + Input: []interface{}{ + map[string]interface{}{ + "virtual_machine": []interface{}{ + map[string]interface{}{ + "delete_os_disk_on_deletion": false, + "graceful_shutdown": false, + "skip_shutdown_and_force_delete": false, + }, + }, + }, + }, + Expected: features.UserFeatures{ + VirtualMachine: features.VirtualMachineFeatures{ + DeleteOSDiskOnDeletion: false, + GracefulShutdown: false, + SkipShutdownAndForceDelete: false, }, }, }, @@ -462,12 +605,32 @@ func TestExpandFeaturesVirtualMachineScaleSet(t *testing.T) { }, }, }, + { + Name: "Force Delete Enabled", + Input: []interface{}{ + map[string]interface{}{ + "virtual_machine_scale_set": []interface{}{ + map[string]interface{}{ + "force_delete": true, + "roll_instances_when_required": false, + }, + }, + }, + }, + Expected: features.UserFeatures{ + VirtualMachineScaleSet: features.VirtualMachineScaleSetFeatures{ + ForceDelete: true, + RollInstancesWhenRequired: false, + }, + }, + }, { Name: "Roll Instances Enabled", Input: []interface{}{ map[string]interface{}{ "virtual_machine_scale_set": []interface{}{ map[string]interface{}{ + "force_delete": false, "roll_instances_when_required": true, }, }, @@ -475,16 +638,18 @@ func TestExpandFeaturesVirtualMachineScaleSet(t *testing.T) { }, Expected: features.UserFeatures{ VirtualMachineScaleSet: features.VirtualMachineScaleSetFeatures{ + ForceDelete: false, RollInstancesWhenRequired: true, }, }, }, { - Name: "Roll Instances Disabled", + Name: "All Fields Disabled", Input: []interface{}{ map[string]interface{}{ "virtual_machine_scale_set": []interface{}{ map[string]interface{}{ + "force_delete": false, "roll_instances_when_required": false, }, }, @@ -492,6 +657,7 @@ func TestExpandFeaturesVirtualMachineScaleSet(t *testing.T) { }, Expected: features.UserFeatures{ VirtualMachineScaleSet: features.VirtualMachineScaleSetFeatures{ + ForceDelete: false, RollInstancesWhenRequired: false, }, }, diff --git a/azurerm/internal/provider/provider.go b/azurerm/internal/provider/provider.go index c839bc6504ed..ab65933112ea 100644 --- a/azurerm/internal/provider/provider.go +++ b/azurerm/internal/provider/provider.go @@ -1,32 +1,32 @@ package provider import ( + "context" "fmt" "log" "os" "strings" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" - "github.com/hashicorp/go-azure-helpers/authentication" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceproviders" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/sdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func AzureProvider() terraform.ResourceProvider { +func AzureProvider() *schema.Provider { return azureProvider(false) } -func TestAzureProvider() terraform.ResourceProvider { +func TestAzureProvider() *schema.Provider { return azureProvider(true) } -func azureProvider(supportLegacyTestSuite bool) terraform.ResourceProvider { +func azureProvider(supportLegacyTestSuite bool) *schema.Provider { // avoids this showing up in test output debugLog := func(f string, v ...interface{}) { if os.Getenv("TF_LOG") == "" { @@ -243,13 +243,13 @@ func azureProvider(supportLegacyTestSuite bool) terraform.ResourceProvider { } } - p.ConfigureFunc = providerConfigure(p) + p.ConfigureContextFunc = providerConfigure(p) return p } -func providerConfigure(p *schema.Provider) schema.ConfigureFunc { - return func(d *schema.ResourceData) (interface{}, error) { +func providerConfigure(p *schema.Provider) schema.ConfigureContextFunc { + return func(ctx context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) { var auxTenants []string if v, ok := d.Get("auxiliary_tenant_ids").([]interface{}); ok && len(v) > 0 { auxTenants = *utils.ExpandStringSlice(v) @@ -258,7 +258,7 @@ func providerConfigure(p *schema.Provider) schema.ConfigureFunc { } if len(auxTenants) > 3 { - return nil, fmt.Errorf("The provider only supports 3 auxiliary tenant IDs") + return nil, diag.FromErr(fmt.Errorf("The provider only supports 3 auxiliary tenant IDs")) } metadataHost := d.Get("metadata_host").(string) @@ -295,7 +295,7 @@ func providerConfigure(p *schema.Provider) schema.ConfigureFunc { config, err := builder.Build() if err != nil { - return nil, fmt.Errorf("Error building AzureRM Client: %s", err) + return nil, diag.FromErr(fmt.Errorf("Error building AzureRM Client: %s", err)) } terraformVersion := p.TerraformVersion @@ -320,29 +320,34 @@ func providerConfigure(p *schema.Provider) schema.ConfigureFunc { // platform level tracing CustomCorrelationRequestID: os.Getenv("ARM_CORRELATION_REQUEST_ID"), } - client, err := clients.Build(p.StopContext(), clientBuilder) + + stopCtx, ok := schema.StopContext(ctx) //nolint:SA1019 + if !ok { + stopCtx = ctx + } + + client, err := clients.Build(stopCtx, clientBuilder) if err != nil { - return nil, err + return nil, diag.FromErr(err) } - client.StopContext = p.StopContext() + client.StopContext = stopCtx if !skipProviderRegistration { // List all the available providers and their registration state to avoid unnecessary // requests. This also lets us check if the provider credentials are correct. - ctx := client.StopContext providerList, err := client.Resource.ProvidersClient.List(ctx, nil, "") if err != nil { - return nil, fmt.Errorf("Unable to list provider registration status, it is possible that this is due to invalid "+ + return nil, diag.FromErr(fmt.Errorf("Unable to list provider registration status, it is possible that this is due to invalid "+ "credentials or the service principal does not have permission to use the Resource Manager API, Azure "+ - "error: %s", err) + "error: %s", err)) } availableResourceProviders := providerList.Values() requiredResourceProviders := resourceproviders.Required() if err := resourceproviders.EnsureRegistered(ctx, *client.Resource.ProvidersClient, availableResourceProviders, requiredResourceProviders); err != nil { - return nil, fmt.Errorf(resourceProviderRegistrationErrorFmt, err) + return nil, diag.FromErr(fmt.Errorf(resourceProviderRegistrationErrorFmt, err)) } } diff --git a/azurerm/internal/provider/provider_test.go b/azurerm/internal/provider/provider_test.go index 966777128da0..1ea3f455da21 100644 --- a/azurerm/internal/provider/provider_test.go +++ b/azurerm/internal/provider/provider_test.go @@ -4,18 +4,16 @@ import ( "fmt" "testing" "time" - - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func TestProvider(t *testing.T) { - if err := TestAzureProvider().(*schema.Provider).InternalValidate(); err != nil { + if err := TestAzureProvider().InternalValidate(); err != nil { t.Fatalf("err: %s", err) } } func TestDataSourcesSupportCustomTimeouts(t *testing.T) { - provider := TestAzureProvider().(*schema.Provider) + provider := TestAzureProvider() for dataSourceName, dataSource := range provider.DataSourcesMap { t.Run(fmt.Sprintf("DataSource/%s", dataSourceName), func(t *testing.T) { t.Logf("[DEBUG] Testing Data Source %q..", dataSourceName) @@ -51,7 +49,7 @@ func TestDataSourcesSupportCustomTimeouts(t *testing.T) { } func TestResourcesSupportCustomTimeouts(t *testing.T) { - provider := TestAzureProvider().(*schema.Provider) + provider := TestAzureProvider() for resourceName, resource := range provider.ResourcesMap { t.Run(fmt.Sprintf("Resource/%s", resourceName), func(t *testing.T) { t.Logf("[DEBUG] Testing Resource %q..", resourceName) @@ -66,7 +64,7 @@ func TestResourcesSupportCustomTimeouts(t *testing.T) { } // every Resource has to have a Create, Read & Destroy timeout - if resource.Timeouts.Create == nil && resource.Create != nil { + if resource.Timeouts.Create == nil && resource.Create != nil { //nolint:SA1019 t.Fatalf("Resource %q defines a Create method but no Create Timeout", resourceName) } if resource.Timeouts.Delete == nil && resource.Delete != nil { diff --git a/azurerm/internal/provider/services.go b/azurerm/internal/provider/services.go index f1c5fe4e8689..7f81dace1b2c 100644 --- a/azurerm/internal/provider/services.go +++ b/azurerm/internal/provider/services.go @@ -29,12 +29,14 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/databricks" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datalake" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datashare" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/desktopvirtualization" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/devspace" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/devtestlabs" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/digitaltwins" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dns" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/domainservices" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventgrid" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/firewall" @@ -100,8 +102,10 @@ import ( func SupportedTypedServices() []sdk.TypedServiceRegistration { return []sdk.TypedServiceRegistration{ + batch.Registration{}, eventhub.Registration{}, loadbalancer.Registration{}, + policy.Registration{}, resource.Registration{}, web.Registration{}, } @@ -137,12 +141,14 @@ func SupportedUntypedServices() []sdk.UntypedServiceRegistration { datalake.Registration{}, databasemigration.Registration{}, databoxedge.Registration{}, + dataprotection.Registration{}, datashare.Registration{}, desktopvirtualization.Registration{}, devspace.Registration{}, devtestlabs.Registration{}, digitaltwins.Registration{}, dns.Registration{}, + domainservices.Registration{}, eventgrid.Registration{}, eventhub.Registration{}, firewall.Registration{}, diff --git a/azurerm/internal/resourceproviders/required.go b/azurerm/internal/resourceproviders/required.go index 447ac9a3850a..0e35cc23b717 100644 --- a/azurerm/internal/resourceproviders/required.go +++ b/azurerm/internal/resourceproviders/required.go @@ -28,12 +28,12 @@ func Required() map[string]struct{} { "Microsoft.DataLakeAnalytics": {}, "Microsoft.DataLakeStore": {}, "Microsoft.DataMigration": {}, + "Microsoft.DataProtection": {}, "Microsoft.DBforMariaDB": {}, "Microsoft.DBforMySQL": {}, "Microsoft.DBforPostgreSQL": {}, "Microsoft.DesktopVirtualization": {}, "Microsoft.Devices": {}, - "Microsoft.DevSpaces": {}, "Microsoft.DevTestLab": {}, "Microsoft.DocumentDB": {}, "Microsoft.EventGrid": {}, diff --git a/azurerm/internal/sdk/README.md b/azurerm/internal/sdk/README.md index 3b644860d2fa..078ab5cc1128 100644 --- a/azurerm/internal/sdk/README.md +++ b/azurerm/internal/sdk/README.md @@ -115,19 +115,19 @@ func (r ResourceGroupResource) Read() sdk.ResourceFunc { return err } - metadata.Logger.Infof("retrieving Resource Group %q..", id.Name) - group, err := client.Get(ctx, id.Name) + metadata.Logger.Infof("retrieving Resource Group %q..", id.ResourceGroup) + group, err := client.Get(ctx, id.ResourceGroup) if err != nil { if utils.ResponseWasNotFound(group.Response) { - metadata.Logger.Infof("Resource Group %q was not found - removing from state!", id.Name) - return metadata.MarkAsGone() + metadata.Logger.Infof("%s was not found - removing from state!", *id) + return metadata.MarkAsGone(id) } - return fmt.Errorf("retrieving Resource Group %q: %+v", id.Name, err) + return fmt.Errorf("retrieving %s: %+v", *id, err) } return metadata.Encode(&ResourceGroup{ - Name: id.Name, + Name: id.ResourceGroup, Location: location.NormalizeNilable(group.Location), Tags: tags.ToTypedObject(group.Tags), }) @@ -150,15 +150,15 @@ func (r ResourceGroupResource) Update() sdk.ResourceFunc { return err } - metadata.Logger.Infof("updating Resource Group %q..", id.Name) + metadata.Logger.Infof("updating %s..", *id) client := metadata.Client.Resource.GroupsClient input := resources.GroupPatchable{ Tags: tags.FromTypedObject(state.Tags), } - if _, err := client.Update(ctx, id.Name, input); err != nil { - return fmt.Errorf("updating Resource Group %q: %+v", id.Name, err) + if _, err := client.Update(ctx, id.ResourceGroup, input); err != nil { + return fmt.Errorf("updating %s: %+v", *id, err) } return nil @@ -176,19 +176,19 @@ func (r ResourceGroupResource) Delete() sdk.ResourceFunc { return err } - metadata.Logger.Infof("deleting Resource Group %q..", id.Name) - future, err := client.Delete(ctx, id.Name) + metadata.Logger.Infof("deleting %s..", *id) + future, err := client.Delete(ctx, id.ResourceGroup, "") if err != nil { if response.WasNotFound(future.Response()) { - return metadata.MarkAsGone() + return metadata.MarkAsGone(id) } - return fmt.Errorf("deleting Resource Group %q: %+v", id.Name, err) + return fmt.Errorf("deleting %s: %+v", *id, err) } - metadata.Logger.Infof("waiting for the deletion of Resource Group %q..", id.Name) + metadata.Logger.Infof("waiting for the deletion of %s..", *id) if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("waiting for deletion of Resource Group %q: %+v", id.Name, err) + return fmt.Errorf("waiting for deletion of %s: %+v", *id, err) } return nil diff --git a/azurerm/internal/sdk/logger_console.go b/azurerm/internal/sdk/logger_console.go index 8218276e4f1b..2233705c181f 100644 --- a/azurerm/internal/sdk/logger_console.go +++ b/azurerm/internal/sdk/logger_console.go @@ -5,6 +5,8 @@ import ( "log" ) +var _ Logger = ConsoleLogger{} + // ConsoleLogger provides a Logger implementation which writes the log messages // to StdOut - in Terraform's perspective that's proxied via the Plugin SDK type ConsoleLogger struct{} diff --git a/azurerm/internal/sdk/logger_diagnostics.go b/azurerm/internal/sdk/logger_diagnostics.go new file mode 100644 index 000000000000..19f6375696ef --- /dev/null +++ b/azurerm/internal/sdk/logger_diagnostics.go @@ -0,0 +1,40 @@ +package sdk + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" +) + +var _ Logger = &DiagnosticsLogger{} + +type DiagnosticsLogger struct { + diagnostics diag.Diagnostics +} + +func (d *DiagnosticsLogger) Info(message string) { + log.Printf("[INFO] %s", message) +} + +func (d *DiagnosticsLogger) Infof(format string, args ...interface{}) { + log.Printf("[INFO] "+format, args...) +} + +func (d *DiagnosticsLogger) Warn(message string) { + d.diagnostics = append(d.diagnostics, diag.Diagnostic{ + Severity: diag.Warning, + Summary: message, + Detail: message, + AttributePath: nil, + }) +} + +func (d *DiagnosticsLogger) Warnf(format string, args ...interface{}) { + d.diagnostics = append(d.diagnostics, diag.Diagnostic{ + Severity: diag.Warning, + Summary: fmt.Sprintf(format, args...), + Detail: fmt.Sprintf(format, args...), + AttributePath: nil, + }) +} diff --git a/azurerm/internal/sdk/plugin_sdk_test.go b/azurerm/internal/sdk/plugin_sdk_test.go index 080c7b01cb46..ab93833b6aba 100644 --- a/azurerm/internal/sdk/plugin_sdk_test.go +++ b/azurerm/internal/sdk/plugin_sdk_test.go @@ -6,9 +6,9 @@ import ( "reflect" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccPluginSDKAndDecoder(t *testing.T) { @@ -62,8 +62,8 @@ func TestAccPluginSDKAndDecoder(t *testing.T) { // lintignore:AT001 resource.ParallelTest(t, resource.TestCase{ - ProviderFactories: map[string]terraform.ResourceProviderFactory{ - "validator": func() (terraform.ResourceProvider, error) { + ProviderFactories: map[string]func() (*schema.Provider, error){ + "validator": func() (*schema.Provider, error) { //nolint:unparam return &schema.Provider{ DataSourcesMap: map[string]*schema.Resource{}, ResourcesMap: map[string]*schema.Resource{ @@ -150,7 +150,7 @@ func TestAccPluginSDKAndDecoder(t *testing.T) { }, }, }, - Create: func(d *schema.ResourceData, i interface{}) error { + Create: func(d *schema.ResourceData, i interface{}) error { //nolint:SA1019 d.SetId("some-id") d.Set("hello", "world") d.Set("random_number", 42) @@ -262,14 +262,14 @@ func TestAccPluginSDKAndDecoderOptionalComputed(t *testing.T) { // lintignore:AT001 resource.ParallelTest(t, resource.TestCase{ - ProviderFactories: map[string]terraform.ResourceProviderFactory{ - "validator": func() (terraform.ResourceProvider, error) { + ProviderFactories: map[string]func() (*schema.Provider, error){ + "validator": func() (*schema.Provider, error) { //nolint:unparam return &schema.Provider{ DataSourcesMap: map[string]*schema.Resource{}, ResourcesMap: map[string]*schema.Resource{ "validator_decoder_specified": { Schema: commonSchema, - Create: func(d *schema.ResourceData, i interface{}) error { + Create: func(d *schema.ResourceData, i interface{}) error { //nolint:SA1019 d.SetId("some-id") return nil }, @@ -285,7 +285,7 @@ func TestAccPluginSDKAndDecoderOptionalComputed(t *testing.T) { "validator_decoder_unspecified": { Schema: commonSchema, - Create: func(d *schema.ResourceData, i interface{}) error { + Create: func(d *schema.ResourceData, i interface{}) error { //nolint:SA1019 d.SetId("some-id") d.Set("hello", "value-from-create") d.Set("number", 42) @@ -317,12 +317,14 @@ resource "validator_decoder_unspecified" "test" {} `, Check: resource.ComposeTestCheckFunc( testCheckResourceStateMatches("validator_decoder_specified.test", map[string]interface{}{ + "%": "4", "id": "some-id", "enabled": "true", "hello": "value-from-config", "number": "21", }), testCheckResourceStateMatches("validator_decoder_unspecified.test", map[string]interface{}{ + "%": "4", "id": "some-id", "enabled": "false", "hello": "value-from-create", @@ -346,8 +348,8 @@ func TestAccPluginSDKAndDecoderOptionalComputedOverride(t *testing.T) { // lintignore:AT001 resource.ParallelTest(t, resource.TestCase{ - ProviderFactories: map[string]terraform.ResourceProviderFactory{ - "validator": func() (terraform.ResourceProvider, error) { + ProviderFactories: map[string]func() (*schema.Provider, error){ + "validator": func() (*schema.Provider, error) { //nolint:unparam return &schema.Provider{ DataSourcesMap: map[string]*schema.Resource{}, ResourcesMap: map[string]*schema.Resource{ @@ -369,7 +371,7 @@ func TestAccPluginSDKAndDecoderOptionalComputedOverride(t *testing.T) { Computed: true, }, }, - Create: func(d *schema.ResourceData, i interface{}) error { + Create: func(d *schema.ResourceData, i interface{}) error { //nolint:SA1019 d.SetId("some-id") d.Set("hello", "value-from-create") d.Set("number", 42) @@ -452,8 +454,8 @@ func TestAccPluginSDKAndDecoderSets(t *testing.T) { // lintignore:AT001 resource.ParallelTest(t, resource.TestCase{ - ProviderFactories: map[string]terraform.ResourceProviderFactory{ - "validator": func() (terraform.ResourceProvider, error) { + ProviderFactories: map[string]func() (*schema.Provider, error){ + "validator": func() (*schema.Provider, error) { //nolint:unparam return &schema.Provider{ DataSourcesMap: map[string]*schema.Resource{}, ResourcesMap: map[string]*schema.Resource{ @@ -488,7 +490,7 @@ func TestAccPluginSDKAndDecoderSets(t *testing.T) { }, }, }, - Create: func(d *schema.ResourceData, i interface{}) error { + Create: func(d *schema.ResourceData, i interface{}) error { //nolint:SA1019 d.SetId("some-id") d.Set("set_of_strings", []string{ "some", @@ -645,8 +647,8 @@ func TestAccPluginSDKAndEncoder(t *testing.T) { // lintignore:AT001 resource.ParallelTest(t, resource.TestCase{ - ProviderFactories: map[string]terraform.ResourceProviderFactory{ - "validator": func() (terraform.ResourceProvider, error) { + ProviderFactories: map[string]func() (*schema.Provider, error){ + "validator": func() (*schema.Provider, error) { //nolint:unparam return &schema.Provider{ DataSourcesMap: map[string]*schema.Resource{}, ResourcesMap: map[string]*schema.Resource{ @@ -761,7 +763,7 @@ func TestAccPluginSDKAndEncoder(t *testing.T) { }, }, }, - Create: func(d *schema.ResourceData, i interface{}) error { + Create: func(d *schema.ResourceData, i interface{}) error { //nolint:SA1019 wrapper := ResourceMetaData{ ResourceData: d, Logger: ConsoleLogger{}, @@ -817,6 +819,7 @@ func TestAccPluginSDKAndEncoder(t *testing.T) { Config: `resource "validator_encoder" "test" {}`, Check: resource.ComposeTestCheckFunc( testCheckResourceStateMatches("validator_encoder.test", map[string]interface{}{ + "%": "17", "id": "some-id", "hello": "world", "random_number": "42", @@ -835,6 +838,7 @@ func TestAccPluginSDKAndEncoder(t *testing.T) { "list_of_floats.0": "-1.234567894321", "list_of_floats.1": "2.3456789", "nested_object.#": "1", + "nested_object.0.%": "1", "nested_object.0.key": "value", "map_of_strings.%": "1", "map_of_strings.bingo": "bango", @@ -861,8 +865,8 @@ func TestAccPluginSDKReturnsComputedFields(t *testing.T) { resourceName := "validator_computed.test" // lintignore:AT001 resource.ParallelTest(t, resource.TestCase{ - ProviderFactories: map[string]terraform.ResourceProviderFactory{ - "validator": func() (terraform.ResourceProvider, error) { + ProviderFactories: map[string]func() (*schema.Provider, error){ + "validator": func() (*schema.Provider, error) { //nolint:unparam return &schema.Provider{ DataSourcesMap: map[string]*schema.Resource{}, ResourcesMap: map[string]*schema.Resource{ @@ -876,6 +880,7 @@ func TestAccPluginSDKReturnsComputedFields(t *testing.T) { Config: `resource "validator_computed" "test" {}`, Check: resource.ComposeTestCheckFunc( testCheckResourceStateMatches(resourceName, map[string]interface{}{ + "%": "9", "id": "does-not-matter", "hello": "world", "random_number": "42", @@ -894,6 +899,7 @@ func TestAccPluginSDKReturnsComputedFields(t *testing.T) { "list_of_floats.0": "-1.234567894321", "list_of_floats.1": "2.3456789", "nested_object.#": "1", + "nested_object.0.%": "1", "nested_object.0.key": "value", // Sets can't really be computed, so this isn't that big a deal }), @@ -974,7 +980,7 @@ func computedFieldsResource() *schema.Resource { }, }, }, - Create: func(d *schema.ResourceData, meta interface{}) error { + Create: func(d *schema.ResourceData, meta interface{}) error { //nolint:SA1019 d.SetId("does-not-matter") return readFunc(d, meta) }, diff --git a/azurerm/internal/sdk/resource.go b/azurerm/internal/sdk/resource.go index 408246730230..ecb9f2846b77 100644 --- a/azurerm/internal/sdk/resource.go +++ b/azurerm/internal/sdk/resource.go @@ -4,12 +4,11 @@ import ( "context" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceid" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) type resourceBase interface { diff --git a/azurerm/internal/sdk/resource_decode.go b/azurerm/internal/sdk/resource_decode.go index cf9fac874616..e0f1f207b31a 100644 --- a/azurerm/internal/sdk/resource_decode.go +++ b/azurerm/internal/sdk/resource_decode.go @@ -4,7 +4,7 @@ import ( "fmt" "reflect" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) // Decode will decode the Terraform Schema into the specified object diff --git a/azurerm/internal/sdk/service_registration.go b/azurerm/internal/sdk/service_registration.go index 94a283b4e725..269c24a0a32e 100644 --- a/azurerm/internal/sdk/service_registration.go +++ b/azurerm/internal/sdk/service_registration.go @@ -11,9 +11,6 @@ type TypedServiceRegistration interface { // Name is the name of this Service Name() string - // PackagePath is the relative path to this package - PackagePath() string - // DataSources returns a list of Data Sources supported by this Service DataSources() []DataSource diff --git a/azurerm/internal/sdk/wrapper_data_source.go b/azurerm/internal/sdk/wrapper_data_source.go index 5f07ccb867ac..9f8e79bbaff7 100644 --- a/azurerm/internal/sdk/wrapper_data_source.go +++ b/azurerm/internal/sdk/wrapper_data_source.go @@ -1,11 +1,11 @@ package sdk import ( + "context" "fmt" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) // DataSourceWrapper is a wrapper for converting a DataSource implementation @@ -19,20 +19,22 @@ type DataSourceWrapper struct { func NewDataSourceWrapper(dataSource DataSource) DataSourceWrapper { return DataSourceWrapper{ dataSource: dataSource, - logger: ConsoleLogger{}, + logger: &DiagnosticsLogger{}, } } // DataSource returns the Terraform Plugin SDK type for this DataSource implementation -func (rw *DataSourceWrapper) DataSource() (*schema.Resource, error) { - resourceSchema, err := combineSchema(rw.dataSource.Arguments(), rw.dataSource.Attributes()) +func (dw *DataSourceWrapper) DataSource() (*schema.Resource, error) { + resourceSchema, err := combineSchema(dw.dataSource.Arguments(), dw.dataSource.Attributes()) if err != nil { return nil, fmt.Errorf("building Schema: %+v", err) } - modelObj := rw.dataSource.ModelObject() - if err := ValidateModelObject(&modelObj); err != nil { - return nil, fmt.Errorf("validating model for %q: %+v", rw.dataSource.ResourceType(), err) + modelObj := dw.dataSource.ModelObject() + if modelObj != nil { + if err := ValidateModelObject(&modelObj); err != nil { + return nil, fmt.Errorf("validating model for %q: %+v", dw.dataSource.ResourceType(), err) + } } d := func(duration time.Duration) *time.Duration { @@ -41,16 +43,18 @@ func (rw *DataSourceWrapper) DataSource() (*schema.Resource, error) { resource := schema.Resource{ Schema: *resourceSchema, - Read: func(d *schema.ResourceData, meta interface{}) error { - ctx, metaData := runArgs(d, meta, rw.logger) - wrappedCtx, cancel := timeouts.ForRead(ctx, d) - defer cancel() - return rw.dataSource.Read().Func(wrappedCtx, metaData) - }, + ReadContext: dw.diagnosticsWrapper(func(ctx context.Context, d *schema.ResourceData, meta interface{}) error { + metaData := runArgs(d, meta, dw.logger) + return dw.dataSource.Read().Func(ctx, metaData) + }), Timeouts: &schema.ResourceTimeout{ - Read: d(rw.dataSource.Read().Timeout), + Read: d(dw.dataSource.Read().Timeout), }, } return &resource, nil } + +func (dw *DataSourceWrapper) diagnosticsWrapper(in func(ctx context.Context, d *schema.ResourceData, meta interface{}) error) schema.ReadContextFunc { + return diagnosticsWrapper(in, dw.logger) +} diff --git a/azurerm/internal/sdk/wrapper_helpers.go b/azurerm/internal/sdk/wrapper_helpers.go index 155e884153b7..a0e466cfd2c9 100644 --- a/azurerm/internal/sdk/wrapper_helpers.go +++ b/azurerm/internal/sdk/wrapper_helpers.go @@ -1,10 +1,9 @@ package sdk import ( - "context" "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" ) @@ -45,9 +44,7 @@ func combineSchema(arguments map[string]*schema.Schema, attributes map[string]*s return &out, nil } -func runArgs(d *schema.ResourceData, meta interface{}, logger Logger) (context.Context, ResourceMetaData) { - // NOTE: this is wrapped as a result of this function, so this is "fine" being unwrapped - stopContext := meta.(*clients.Client).StopContext +func runArgs(d *schema.ResourceData, meta interface{}, logger Logger) ResourceMetaData { client := meta.(*clients.Client) metaData := ResourceMetaData{ Client: client, @@ -56,5 +53,5 @@ func runArgs(d *schema.ResourceData, meta interface{}, logger Logger) (context.C serializationDebugLogger: NullLogger{}, } - return stopContext, metaData + return metaData } diff --git a/azurerm/internal/sdk/wrapper_resource.go b/azurerm/internal/sdk/wrapper_resource.go index b804400c34f4..97f54c3ddd38 100644 --- a/azurerm/internal/sdk/wrapper_resource.go +++ b/azurerm/internal/sdk/wrapper_resource.go @@ -5,9 +5,9 @@ import ( "fmt" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" ) // ResourceWrapper is a wrapper for converting a Resource implementation @@ -20,7 +20,7 @@ type ResourceWrapper struct { // NewResourceWrapper returns a ResourceWrapper for this Resource implementation func NewResourceWrapper(resource Resource) ResourceWrapper { return ResourceWrapper{ - logger: ConsoleLogger{}, + logger: &DiagnosticsLogger{}, resource: resource, } } @@ -33,8 +33,10 @@ func (rw *ResourceWrapper) Resource() (*schema.Resource, error) { } modelObj := rw.resource.ModelObject() - if err := ValidateModelObject(&modelObj); err != nil { - return nil, fmt.Errorf("validating model for %q: %+v", rw.resource.ResourceType(), err) + if modelObj != nil { + if err := ValidateModelObject(&modelObj); err != nil { + return nil, fmt.Errorf("validating model for %q: %+v", rw.resource.ResourceType(), err) + } } d := func(duration time.Duration) *time.Duration { @@ -44,33 +46,27 @@ func (rw *ResourceWrapper) Resource() (*schema.Resource, error) { resource := schema.Resource{ Schema: *resourceSchema, - Create: func(d *schema.ResourceData, meta interface{}) error { - ctx, metaData := runArgs(d, meta, rw.logger) - wrappedCtx, cancel := timeouts.ForCreate(ctx, d) - defer cancel() - err := rw.resource.Create().Func(wrappedCtx, metaData) + CreateContext: rw.diagnosticsWrapper(func(ctx context.Context, d *schema.ResourceData, meta interface{}) error { + metaData := runArgs(d, meta, rw.logger) + err := rw.resource.Create().Func(ctx, metaData) if err != nil { return err } // NOTE: whilst this may look like we should use the Read // functions timeout here, we're still /technically/ in the // Create function so reusing that timeout should be sufficient - return rw.resource.Read().Func(wrappedCtx, metaData) - }, + return rw.resource.Read().Func(ctx, metaData) + }), // looks like these could be reused, easiest if they're not - Read: func(d *schema.ResourceData, meta interface{}) error { - ctx, metaData := runArgs(d, meta, rw.logger) - wrappedCtx, cancel := timeouts.ForRead(ctx, d) - defer cancel() - return rw.resource.Read().Func(wrappedCtx, metaData) - }, - Delete: func(d *schema.ResourceData, meta interface{}) error { - ctx, metaData := runArgs(d, meta, rw.logger) - wrappedCtx, cancel := timeouts.ForDelete(ctx, d) - defer cancel() - return rw.resource.Delete().Func(wrappedCtx, metaData) - }, + ReadContext: rw.diagnosticsWrapper(func(ctx context.Context, d *schema.ResourceData, meta interface{}) error { + metaData := runArgs(d, meta, rw.logger) + return rw.resource.Read().Func(ctx, metaData) + }), + DeleteContext: rw.diagnosticsWrapper(func(ctx context.Context, d *schema.ResourceData, meta interface{}) error { + metaData := runArgs(d, meta, rw.logger) + return rw.resource.Delete().Func(ctx, metaData) + }), Timeouts: &schema.ResourceTimeout{ Create: d(rw.resource.Create().Timeout), @@ -96,7 +92,7 @@ func (rw *ResourceWrapper) Resource() (*schema.Resource, error) { return nil }, func(ctx context.Context, d *pluginsdk.ResourceData, meta interface{}) ([]*pluginsdk.ResourceData, error) { if v, ok := rw.resource.(ResourceWithCustomImporter); ok { - _, metaData := runArgs(d, meta, rw.logger) + metaData := runArgs(d, meta, rw.logger) err := v.CustomImporter()(ctx, metaData) if err != nil { @@ -106,27 +102,25 @@ func (rw *ResourceWrapper) Resource() (*schema.Resource, error) { return []*pluginsdk.ResourceData{metaData.ResourceData}, nil } - return schema.ImportStatePassthrough(d, meta) + return schema.ImportStatePassthroughContext(ctx, d, meta) }), } // Not all resources support update - so this is an separate interface // implementations can opt to interface if v, ok := rw.resource.(ResourceWithUpdate); ok { - resource.Update = func(d *schema.ResourceData, meta interface{}) error { - ctx, metaData := runArgs(d, meta, rw.logger) - wrappedCtx, cancel := timeouts.ForUpdate(ctx, d) - defer cancel() + resource.UpdateContext = rw.diagnosticsWrapper(func(ctx context.Context, d *schema.ResourceData, meta interface{}) error { + metaData := runArgs(d, meta, rw.logger) - err := v.Update().Func(wrappedCtx, metaData) + err := v.Update().Func(ctx, metaData) if err != nil { return err } // whilst this may look like we should use the Update timeout here // we're still "technically" in the update method, so reusing the // Update's timeout should be fine - return rw.resource.Read().Func(wrappedCtx, metaData) - } + return rw.resource.Read().Func(ctx, metaData) + }) resource.Timeouts.Update = d(v.Update().Timeout) } @@ -144,3 +138,27 @@ func (rw *ResourceWrapper) Resource() (*schema.Resource, error) { return &resource, nil } + +func (rw *ResourceWrapper) diagnosticsWrapper(in func(ctx context.Context, d *schema.ResourceData, meta interface{}) error) func(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return diagnosticsWrapper(in, rw.logger) +} + +func diagnosticsWrapper(in func(ctx context.Context, d *schema.ResourceData, meta interface{}) error, logger Logger) func(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return func(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + out := make([]diag.Diagnostic, 0) + if err := in(ctx, d, meta); err != nil { + out = append(out, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + Detail: err.Error(), + AttributePath: nil, + }) + } + + if diagsLogger, ok := logger.(*DiagnosticsLogger); ok { + out = append(out, diagsLogger.diagnostics...) + } + + return out + } +} diff --git a/azurerm/internal/services/analysisservices/validate/querypool_connection_mode.go b/azurerm/internal/services/analysisservices/validate/querypool_connection_mode.go index a93c1bb78804..60fd8184e521 100644 --- a/azurerm/internal/services/analysisservices/validate/querypool_connection_mode.go +++ b/azurerm/internal/services/analysisservices/validate/querypool_connection_mode.go @@ -2,11 +2,11 @@ package validate import ( "github.com/Azure/azure-sdk-for-go/services/analysisservices/mgmt/2017-08-01/analysisservices" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" ) -func QueryPoolConnectionMode() schema.SchemaValidateFunc { +func QueryPoolConnectionMode() pluginsdk.SchemaValidateFunc { connectionModes := make([]string, len(analysisservices.PossibleConnectionModeValues())) for i, v := range analysisservices.PossibleConnectionModeValues() { connectionModes[i] = string(v) diff --git a/azurerm/internal/services/apimanagement/api_management_api_diagnostic_resource.go b/azurerm/internal/services/apimanagement/api_management_api_diagnostic_resource.go index 8a89fc0e39be..7899d689c7bc 100644 --- a/azurerm/internal/services/apimanagement/api_management_api_diagnostic_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_api_diagnostic_resource.go @@ -135,6 +135,17 @@ func resourceApiManagementApiDiagnosticAdditionalContentSchema() *pluginsdk.Sche }, Set: pluginsdk.HashString, }, + "data_masking": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "query_params": schemaApiManagementDataMaskingEntityList(), + "headers": schemaApiManagementDataMaskingEntityList(), + }, + }, + }, }, }, } @@ -329,6 +340,8 @@ func expandApiManagementApiDiagnosticHTTPMessageDiagnostic(input []interface{}) result.Headers = &headers } + result.DataMasking = expandApiManagementDataMasking(v["data_masking"].([]interface{})) + return result } @@ -348,7 +361,105 @@ func flattenApiManagementApiDiagnosticHTTPMessageDiagnostic(input *apimanagement if input.Headers != nil { diagnostic["headers_to_log"] = set.FromStringSlice(*input.Headers) } + + diagnostic["data_masking"] = flattenApiManagementDataMasking(input.DataMasking) + result = append(result, diagnostic) return result } + +func schemaApiManagementDataMaskingEntityList() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "mode": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + string(apimanagement.Hide), + string(apimanagement.Mask), + }, false), + }, + + "value": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + }, + } +} + +func expandApiManagementDataMasking(input []interface{}) *apimanagement.DataMasking { + if len(input) == 0 || input[0] == nil { + return nil + } + + inputRaw := input[0].(map[string]interface{}) + return &apimanagement.DataMasking{ + QueryParams: expandApiManagementDataMaskingEntityList(inputRaw["query_params"].([]interface{})), + Headers: expandApiManagementDataMaskingEntityList(inputRaw["headers"].([]interface{})), + } +} + +func expandApiManagementDataMaskingEntityList(input []interface{}) *[]apimanagement.DataMaskingEntity { + if len(input) == 0 || input[0] == nil { + return nil + } + + result := make([]apimanagement.DataMaskingEntity, 0) + for _, v := range input { + entity := v.(map[string]interface{}) + result = append(result, apimanagement.DataMaskingEntity{ + Mode: apimanagement.DataMaskingMode(entity["mode"].(string)), + Value: utils.String(entity["value"].(string)), + }) + } + return &result +} + +func flattenApiManagementDataMasking(dataMasking *apimanagement.DataMasking) []interface{} { + if dataMasking == nil { + return []interface{}{} + } + + var queryParams, headers []interface{} + if dataMasking.QueryParams != nil { + queryParams = flattenApiManagementDataMaskingEntityList(dataMasking.QueryParams) + } + if dataMasking.Headers != nil { + headers = flattenApiManagementDataMaskingEntityList(dataMasking.Headers) + } + + return []interface{}{ + map[string]interface{}{ + "query_params": queryParams, + "headers": headers, + }, + } +} + +func flattenApiManagementDataMaskingEntityList(dataMaskingList *[]apimanagement.DataMaskingEntity) []interface{} { + if dataMaskingList == nil || len(*dataMaskingList) == 0 { + return []interface{}{} + } + + result := []interface{}{} + + for _, entity := range *dataMaskingList { + var value string + if entity.Value != nil { + value = *entity.Value + } + result = append(result, map[string]interface{}{ + "mode": string(entity.Mode), + "value": value, + }) + } + + return result +} diff --git a/azurerm/internal/services/apimanagement/api_management_api_diagnostic_resource_test.go b/azurerm/internal/services/apimanagement/api_management_api_diagnostic_resource_test.go index 3baaba870917..175ca69ab6eb 100644 --- a/azurerm/internal/services/apimanagement/api_management_api_diagnostic_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_api_diagnostic_resource_test.go @@ -83,6 +83,28 @@ func TestAccApiManagementApiDiagnostic_complete(t *testing.T) { }) } +func TestAccApiManagementApiDiagnostic_dataMasking(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_api_diagnostic", "test") + r := ApiManagementApiDiagnosticResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.dataMaskingUpdate(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func (ApiManagementApiDiagnosticResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.ApiDiagnosticID(state.ID) if err != nil { @@ -121,7 +143,7 @@ resource "azurerm_api_management" "test" { resource_group_name = azurerm_resource_group.test.name publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } resource "azurerm_api_management_logger" "test" { @@ -214,6 +236,87 @@ func (r ApiManagementApiDiagnosticResource) complete(data acceptance.TestData) s return fmt.Sprintf(` %s +resource "azurerm_api_management_api_diagnostic" "test" { + identifier = "applicationinsights" + resource_group_name = azurerm_resource_group.test.name + api_management_name = azurerm_api_management.test.name + api_name = azurerm_api_management_api.test.name + api_management_logger_id = azurerm_api_management_logger.test.id + sampling_percentage = 1.0 + always_log_errors = true + log_client_ip = true + http_correlation_protocol = "W3C" + verbosity = "verbose" + + backend_request { + body_bytes = 1 + headers_to_log = ["Host"] + data_masking { + query_params { + mode = "Hide" + value = "backend-Request-Test" + } + headers { + mode = "Mask" + value = "backend-Request-Header" + } + } + } + + backend_response { + body_bytes = 2 + headers_to_log = ["Content-Type"] + data_masking { + query_params { + mode = "Mask" + value = "backend-Resp-Test" + } + } + } + + frontend_request { + body_bytes = 3 + headers_to_log = ["Accept"] + data_masking { + headers { + mode = "Mask" + value = "frontend-Request-Header" + } + } + } + + frontend_response { + body_bytes = 4 + headers_to_log = ["Content-Length"] + data_masking { + query_params { + mode = "Hide" + value = "frontend-Response-Test" + } + + query_params { + mode = "Mask" + value = "frontend-Response-Test-Alt" + } + headers { + mode = "Mask" + value = "frontend-Response-Header" + } + + headers { + mode = "Mask" + value = "frontend-Response-Header-Alt" + } + } + } +} +`, r.template(data)) +} + +func (r ApiManagementApiDiagnosticResource) dataMaskingUpdate(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + resource "azurerm_api_management_api_diagnostic" "test" { identifier = "applicationinsights" resource_group_name = azurerm_resource_group.test.name @@ -234,16 +337,48 @@ resource "azurerm_api_management_api_diagnostic" "test" { backend_response { body_bytes = 2 headers_to_log = ["Content-Type"] + data_masking { + query_params { + mode = "Hide" + value = "backend-Resp-Test-Update" + } + } } frontend_request { body_bytes = 3 headers_to_log = ["Accept"] + data_masking { + headers { + mode = "Mask" + value = "frontend-Request-Header-Update" + } + } } frontend_response { body_bytes = 4 headers_to_log = ["Content-Length"] + data_masking { + query_params { + mode = "Hide" + value = "frontend-Response-Test-Update" + } + + query_params { + mode = "Mask" + value = "frontend-Response-Test-Alt-Update" + } + + query_params { + mode = "Mask" + value = "frontend-Response-Test-Alt2-Update" + } + headers { + mode = "Mask" + value = "frontend-Response-Header-Update" + } + } } } `, r.template(data)) diff --git a/azurerm/internal/services/apimanagement/api_management_api_operation_policy_resource.go b/azurerm/internal/services/apimanagement/api_management_api_operation_policy_resource.go index e15d699e982a..9e07363e4695 100644 --- a/azurerm/internal/services/apimanagement/api_management_api_operation_policy_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_api_operation_policy_resource.go @@ -6,13 +6,12 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" diff --git a/azurerm/internal/services/apimanagement/api_management_api_operation_resource.go b/azurerm/internal/services/apimanagement/api_management_api_operation_resource.go index a934c744686b..0f8542471ba4 100644 --- a/azurerm/internal/services/apimanagement/api_management_api_operation_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_api_operation_resource.go @@ -5,13 +5,12 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" @@ -139,19 +138,19 @@ func resourceApiManagementApiOperationCreateUpdate(d *pluginsdk.ResourceData, me urlTemplate := d.Get("url_template").(string) requestContractRaw := d.Get("request").([]interface{}) - requestContract, err := expandApiManagementOperationRequestContract(requestContractRaw) + requestContract, err := expandApiManagementOperationRequestContract(d, "request", requestContractRaw) if err != nil { return err } responseContractsRaw := d.Get("response").([]interface{}) - responseContracts, err := expandApiManagementOperationResponseContract(responseContractsRaw) + responseContracts, err := expandApiManagementOperationResponseContract(d, "response", responseContractsRaw) if err != nil { return err } templateParametersRaw := d.Get("template_parameter").([]interface{}) - templateParameters := schemaz.ExpandApiManagementOperationParameterContract(templateParametersRaw) + templateParameters := schemaz.ExpandApiManagementOperationParameterContract(d, "template_parameter", templateParametersRaw) parameters := apimanagement.OperationContract{ OperationContractProperties: &apimanagement.OperationContractProperties{ @@ -260,7 +259,7 @@ func resourceApiManagementApiOperationDelete(d *pluginsdk.ResourceData, meta int return nil } -func expandApiManagementOperationRequestContract(input []interface{}) (*apimanagement.RequestContract, error) { +func expandApiManagementOperationRequestContract(d *pluginsdk.ResourceData, schemaPath string, input []interface{}) (*apimanagement.RequestContract, error) { if len(input) == 0 || input[0] == nil { return nil, nil } @@ -275,19 +274,19 @@ func expandApiManagementOperationRequestContract(input []interface{}) (*apimanag if headersRaw == nil { headersRaw = []interface{}{} } - headers := schemaz.ExpandApiManagementOperationParameterContract(headersRaw) + headers := schemaz.ExpandApiManagementOperationParameterContract(d, fmt.Sprintf("%s.0.header", schemaPath), headersRaw) queryParametersRaw := vs["query_parameter"].([]interface{}) if queryParametersRaw == nil { queryParametersRaw = []interface{}{} } - queryParameters := schemaz.ExpandApiManagementOperationParameterContract(queryParametersRaw) + queryParameters := schemaz.ExpandApiManagementOperationParameterContract(d, fmt.Sprintf("%s.0.query_parameter", schemaPath), queryParametersRaw) representationsRaw := vs["representation"].([]interface{}) if representationsRaw == nil { representationsRaw = []interface{}{} } - representations, err := schemaz.ExpandApiManagementOperationRepresentation(representationsRaw) + representations, err := schemaz.ExpandApiManagementOperationRepresentation(d, fmt.Sprintf("%s.0.representation", schemaPath), representationsRaw) if err != nil { return nil, err } @@ -318,24 +317,24 @@ func flattenApiManagementOperationRequestContract(input *apimanagement.RequestCo return []interface{}{output} } -func expandApiManagementOperationResponseContract(input []interface{}) (*[]apimanagement.ResponseContract, error) { +func expandApiManagementOperationResponseContract(d *pluginsdk.ResourceData, schemaPath string, input []interface{}) (*[]apimanagement.ResponseContract, error) { if len(input) == 0 { return &[]apimanagement.ResponseContract{}, nil } outputs := make([]apimanagement.ResponseContract, 0) - for _, v := range input { + for i, v := range input { vs := v.(map[string]interface{}) description := vs["description"].(string) statusCode := vs["status_code"].(int) headersRaw := vs["header"].([]interface{}) - headers := schemaz.ExpandApiManagementOperationParameterContract(headersRaw) + headers := schemaz.ExpandApiManagementOperationParameterContract(d, fmt.Sprintf("%s.%d.header", schemaPath, i), headersRaw) representationsRaw := vs["representation"].([]interface{}) - representations, err := schemaz.ExpandApiManagementOperationRepresentation(representationsRaw) + representations, err := schemaz.ExpandApiManagementOperationRepresentation(d, fmt.Sprintf("%s.%d.representation", schemaPath, i), representationsRaw) if err != nil { return nil, err } diff --git a/azurerm/internal/services/apimanagement/api_management_api_operation_resource_test.go b/azurerm/internal/services/apimanagement/api_management_api_operation_resource_test.go index 756de16e1849..518dcbc82367 100644 --- a/azurerm/internal/services/apimanagement/api_management_api_operation_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_api_operation_resource_test.go @@ -262,6 +262,7 @@ resource "azurerm_api_management_api_operation" "test" { name = "X-Test-Operation" required = true type = "string" + values = ["application/x-www-form-urlencoded"] } representation { @@ -433,7 +434,7 @@ resource "azurerm_api_management" "test" { resource_group_name = azurerm_resource_group.test.name publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } resource "azurerm_api_management_api" "test" { diff --git a/azurerm/internal/services/apimanagement/api_management_api_operation_tag_resource.go b/azurerm/internal/services/apimanagement/api_management_api_operation_tag_resource.go new file mode 100644 index 000000000000..74dfb6953d26 --- /dev/null +++ b/azurerm/internal/services/apimanagement/api_management_api_operation_tag_resource.go @@ -0,0 +1,155 @@ +package apimanagement + +import ( + "fmt" + "log" + "time" + + "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceApiManagementApiOperationTag() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceApiManagementApiOperationTagCreateUpdate, + Read: resourceApiManagementApiOperationTagRead, + Update: resourceApiManagementApiOperationTagCreateUpdate, + Delete: resourceApiManagementApiOperationTagDelete, + + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.OperationTagID(id) + return err + }), + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*pluginsdk.Schema{ + "api_operation_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.ApiOperationID, + }, + + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.ApiManagementChildName, + }, + + "display_name": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + } +} + +func resourceApiManagementApiOperationTagCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + client := meta.(*clients.Client).ApiManagement.TagClient + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + apiOperationId, err := parse.ApiOperationID(d.Get("api_operation_id").(string)) + if err != nil { + return err + } + name := d.Get("name").(string) + + id := parse.NewOperationTagID(subscriptionId, apiOperationId.ResourceGroup, apiOperationId.ServiceName, apiOperationId.ApiName, apiOperationId.OperationName, name) + + if d.IsNewResource() { + existing, err := client.Get(ctx, apiOperationId.ResourceGroup, apiOperationId.ServiceName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("checking for presence of existing Tag %q: %s", id, err) + } + } + + if !utils.ResponseWasNotFound(existing.Response) { + return tf.ImportAsExistsError("azurerm_api_management_api_operation_tag", id.ID()) + } + } + + parameters := apimanagement.TagCreateUpdateParameters{ + TagContractProperties: &apimanagement.TagContractProperties{ + DisplayName: utils.String(d.Get("display_name").(string)), + }, + } + + if _, err := client.CreateOrUpdate(ctx, apiOperationId.ResourceGroup, apiOperationId.ServiceName, name, parameters, ""); err != nil { + return fmt.Errorf("creating/updating %q: %+v", id, err) + } + + if _, err := client.AssignToOperation(ctx, apiOperationId.ResourceGroup, apiOperationId.ServiceName, apiOperationId.ApiName, apiOperationId.OperationName, name); err != nil { + return fmt.Errorf("assigning to operation %q: %+v", id, err) + } + + d.SetId(id.ID()) + + return resourceApiManagementApiOperationTagRead(d, meta) +} + +func resourceApiManagementApiOperationTagRead(d *pluginsdk.ResourceData, meta interface{}) error { + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + client := meta.(*clients.Client).ApiManagement.TagClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.OperationTagID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, id.ResourceGroup, id.ServiceName, id.TagName) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[DEBUG] %q was not found - removing from state!", id) + d.SetId("") + return nil + } + + return fmt.Errorf("retrieving %q: %+v", id, err) + } + + d.Set("api_operation_id", parse.NewApiOperationID(subscriptionId, id.ResourceGroup, id.ServiceName, id.ApiName, id.OperationName).ID()) + d.Set("name", id.TagName) + + if props := resp.TagContractProperties; props != nil { + d.Set("display_name", props.DisplayName) + } + + return nil +} + +func resourceApiManagementApiOperationTagDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).ApiManagement.TagClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.OperationTagID(d.Id()) + if err != nil { + return err + } + + if _, err = client.Delete(ctx, id.ResourceGroup, id.ServiceName, id.TagName, ""); err != nil { + return fmt.Errorf("deleting %q: %+v", id, err) + } + + return nil +} diff --git a/azurerm/internal/services/apimanagement/api_management_api_operation_tag_resource_test.go b/azurerm/internal/services/apimanagement/api_management_api_operation_tag_resource_test.go new file mode 100644 index 000000000000..2f82e3cbbd51 --- /dev/null +++ b/azurerm/internal/services/apimanagement/api_management_api_operation_tag_resource_test.go @@ -0,0 +1,127 @@ +package apimanagement_test + +import ( + "context" + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type ApiManagementApiOperationTagResource struct { +} + +func TestAccApiManagementApiOperationTag_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_api_operation_tag", "test") + r := ApiManagementApiOperationTagResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccApiManagementApiOperationTag_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_api_operation_tag", "test") + r := ApiManagementApiOperationTagResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func TestAccApiManagementApiOperationTag_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_api_operation_tag", "test") + r := ApiManagementApiOperationTagResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.update(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func (ApiManagementApiOperationTagResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.OperationTagID(state.ID) + if err != nil { + return nil, err + } + + resp, err := clients.ApiManagement.TagClient.Get(ctx, id.ResourceGroup, id.ServiceName, id.TagName) + if err != nil { + return nil, fmt.Errorf("reading %q: %+v", id, err) + } + + return utils.Bool(resp.ID != nil), nil +} + +func (r ApiManagementApiOperationTagResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_api_operation_tag" "test" { + api_operation_id = azurerm_api_management_api_operation.test.id + name = "acctest-Op-Tag-%d" + display_name = "Display-Op-Tag" +} +`, ApiManagementApiOperationResource{}.basic(data), data.RandomInteger) +} + +func (r ApiManagementApiOperationTagResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_api_operation_tag" "import" { + api_operation_id = azurerm_api_management_api_operation_tag.test.api_operation_id + name = azurerm_api_management_api_operation_tag.test.name + display_name = azurerm_api_management_api_operation_tag.test.display_name +} +`, r.basic(data)) +} + +func (r ApiManagementApiOperationTagResource) update(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_api_operation_tag" "test" { + api_operation_id = azurerm_api_management_api_operation.test.id + name = "acctest-Op-Tag-%d" + + display_name = "Display-Op-Tag Updated" +} +`, ApiManagementApiOperationResource{}.basic(data), data.RandomInteger) +} diff --git a/azurerm/internal/services/apimanagement/api_management_api_policy_resource.go b/azurerm/internal/services/apimanagement/api_management_api_policy_resource.go index 1f510653bcc6..526508f4384c 100644 --- a/azurerm/internal/services/apimanagement/api_management_api_policy_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_api_policy_resource.go @@ -6,13 +6,12 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" diff --git a/azurerm/internal/services/apimanagement/api_management_api_policy_resource_test.go b/azurerm/internal/services/apimanagement/api_management_api_policy_resource_test.go index 2a5a64e8c732..60ba21336f85 100644 --- a/azurerm/internal/services/apimanagement/api_management_api_policy_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_api_policy_resource_test.go @@ -132,7 +132,7 @@ resource "azurerm_api_management" "test" { resource_group_name = azurerm_resource_group.test.name publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } resource "azurerm_api_management_api" "test" { @@ -184,7 +184,7 @@ resource "azurerm_api_management" "test" { resource_group_name = azurerm_resource_group.test.name publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } resource "azurerm_api_management_api" "test" { diff --git a/azurerm/internal/services/apimanagement/api_management_api_release.go b/azurerm/internal/services/apimanagement/api_management_api_release.go new file mode 100644 index 000000000000..681a9b98bb36 --- /dev/null +++ b/azurerm/internal/services/apimanagement/api_management_api_release.go @@ -0,0 +1,145 @@ +package apimanagement + +import ( + "fmt" + "log" + "time" + + "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceApiManagementApiRelease() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceApiManagementApiReleaseCreateUpdate, + Read: resourceApiManagementApiReleaseRead, + Update: resourceApiManagementApiReleaseCreateUpdate, + Delete: resourceApiManagementApiReleaseDelete, + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.ApiReleaseID(id) + return err + }), + + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.ApiManagementChildName, + }, + + "api_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.ApiID, + }, + + "notes": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + } +} +func resourceApiManagementApiReleaseCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + client := meta.(*clients.Client).ApiManagement.ApiReleasesClient + ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) + defer cancel() + + name := d.Get("name").(string) + apiId, err := parse.ApiID(d.Get("api_id").(string)) + if err != nil { + return err + } + id := parse.NewApiReleaseID(subscriptionId, apiId.ResourceGroup, apiId.ServiceName, apiId.Name, name) + ifMatch := "*" + + if d.IsNewResource() { + existing, err := client.Get(ctx, apiId.ResourceGroup, apiId.ServiceName, apiId.Name, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("checking for existing %s: %+v", id, err) + } + } + if !utils.ResponseWasNotFound(existing.Response) { + return tf.ImportAsExistsError("azurerm_api_management_api_release", id.ID()) + } + ifMatch = "" + } + + parameters := apimanagement.APIReleaseContract{ + APIReleaseContractProperties: &apimanagement.APIReleaseContractProperties{ + APIID: utils.String(d.Get("api_id").(string)), + Notes: utils.String(d.Get("notes").(string)), + }, + } + + if _, err := client.CreateOrUpdate(ctx, apiId.ResourceGroup, apiId.ServiceName, apiId.Name, name, parameters, ifMatch); err != nil { + return fmt.Errorf("creating/ updating %s: %+v", id, err) + } + + d.SetId(id.ID()) + return resourceApiManagementApiReleaseRead(d, meta) +} + +func resourceApiManagementApiReleaseRead(d *pluginsdk.ResourceData, meta interface{}) error { + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + client := meta.(*clients.Client).ApiManagement.ApiReleasesClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.ApiReleaseID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, id.ResourceGroup, id.ServiceName, id.ApiName, id.ReleaseName) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[INFO] apimanagement %s does not exist - removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("retrieving %s: %+v", id, err) + } + d.Set("name", id.ReleaseName) + if props := resp.APIReleaseContractProperties; props != nil { + d.Set("api_id", parse.NewApiID(subscriptionId, id.ResourceGroup, id.ServiceName, id.ApiName).ID()) + d.Set("notes", props.Notes) + } + return nil +} + +func resourceApiManagementApiReleaseDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).ApiManagement.ApiReleasesClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.ApiReleaseID(d.Id()) + if err != nil { + return err + } + + if _, err := client.Delete(ctx, id.ResourceGroup, id.ServiceName, id.ApiName, id.ReleaseName, "*"); err != nil { + return fmt.Errorf("deleting %s: %+v", id, err) + } + return nil +} diff --git a/azurerm/internal/services/apimanagement/api_management_api_release_resource_test.go b/azurerm/internal/services/apimanagement/api_management_api_release_resource_test.go new file mode 100644 index 000000000000..0d34be906874 --- /dev/null +++ b/azurerm/internal/services/apimanagement/api_management_api_release_resource_test.go @@ -0,0 +1,157 @@ +package apimanagement_test + +import ( + "context" + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type ApiManagementApiReleaseResource struct { +} + +func TestAccApiManagementApiRelease_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_api_release", "test") + r := ApiManagementApiReleaseResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccApiManagementApiRelease_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_api_release", "test") + r := ApiManagementApiReleaseResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func TestAccApiManagementApiRelease_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_api_release", "test") + r := ApiManagementApiReleaseResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccApiManagementApiRelease_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_api_release", "test") + r := ApiManagementApiReleaseResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.update(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func (ApiManagementApiReleaseResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.ApiReleaseID(state.ID) + if err != nil { + return nil, err + } + + resp, err := clients.ApiManagement.ApiReleasesClient.Get(ctx, id.ResourceGroup, id.ServiceName, id.ApiName, id.ReleaseName) + if err != nil { + return nil, fmt.Errorf("reading ApiManagement Api Release (%s): %+v", id, err) + } + + return utils.Bool(resp.ID != nil), nil +} + +func (r ApiManagementApiReleaseResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_api_release" "test" { + name = "acctest-ApiRelease-%d" + api_id = azurerm_api_management_api.test.id +} +`, ApiManagementApiResource{}.basic(data), data.RandomInteger) +} + +func (r ApiManagementApiReleaseResource) complete(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_api_release" "test" { + name = "acctest-ApiRelease-%d" + api_id = azurerm_api_management_api.test.id + notes = "Release 1.0" +} +`, ApiManagementApiResource{}.basic(data), data.RandomInteger) +} +func (r ApiManagementApiReleaseResource) update(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_api_release" "test" { + name = "acctest-ApiRelease-%d" + api_id = azurerm_api_management_api.test.id + notes = "Release 2.0" +} +`, ApiManagementApiResource{}.basic(data), data.RandomInteger) +} + +func (r ApiManagementApiReleaseResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_api_release" "import" { + name = azurerm_api_management_api_release.test.name + api_id = azurerm_api_management_api_release.test.api_id +} +`, r.basic(data)) +} diff --git a/azurerm/internal/services/apimanagement/api_management_api_resource.go b/azurerm/internal/services/apimanagement/api_management_api_resource.go index 758db383c68d..056a341e3994 100644 --- a/azurerm/internal/services/apimanagement/api_management_api_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_api_resource.go @@ -6,14 +6,13 @@ import ( "strings" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/validate" - "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" @@ -45,19 +44,22 @@ func resourceApiManagementApi() *pluginsdk.Resource { "display_name": { Type: pluginsdk.TypeString, - Required: true, + Optional: true, + Computed: true, ValidateFunc: validation.StringIsNotEmpty, }, "path": { Type: pluginsdk.TypeString, - Required: true, + Optional: true, + Computed: true, ValidateFunc: validate.ApiManagementApiPath, }, "protocols": { Type: pluginsdk.TypeSet, - Required: true, + Optional: true, + Computed: true, Elem: &pluginsdk.Schema{ Type: pluginsdk.TypeString, ValidateFunc: validation.StringInSlice([]string{ @@ -74,6 +76,12 @@ func resourceApiManagementApi() *pluginsdk.Resource { ValidateFunc: validation.StringIsNotEmpty, }, + "revision_description": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + // Optional "description": { Type: pluginsdk.TypeString, @@ -172,6 +180,12 @@ func resourceApiManagementApi() *pluginsdk.Resource { Default: false, }, + "source_api_id": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validate.ApiID, + }, + "oauth2_authorization": { Type: pluginsdk.TypeList, Optional: true, @@ -235,6 +249,12 @@ func resourceApiManagementApi() *pluginsdk.Resource { Optional: true, }, + "version_description": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "version_set_id": { Type: pluginsdk.TypeString, Computed: true, @@ -257,11 +277,19 @@ func resourceApiManagementApiCreateUpdate(d *pluginsdk.ResourceData, meta interf apiId := fmt.Sprintf("%s;rev=%s", name, revision) version := d.Get("version").(string) versionSetId := d.Get("version_set_id").(string) + displayName := d.Get("display_name").(string) + protocolsRaw := d.Get("protocols").(*pluginsdk.Set).List() + protocols := expandApiManagementApiProtocols(protocolsRaw) + sourceApiId := d.Get("source_api_id").(string) if version != "" && versionSetId == "" { return fmt.Errorf("setting `version` without the required `version_set_id`") } + if sourceApiId == "" && (displayName == "" || protocols == nil || len(*protocols) == 0) { + return fmt.Errorf("`display_name`, `protocols` are required when `source_api_id` is not set") + } + if d.IsNewResource() { existing, err := client.Get(ctx, resourceGroup, serviceName, apiId) if err != nil { @@ -339,13 +367,9 @@ func resourceApiManagementApiCreateUpdate(d *pluginsdk.ResourceData, meta interf } description := d.Get("description").(string) - displayName := d.Get("display_name").(string) serviceUrl := d.Get("service_url").(string) subscriptionRequired := d.Get("subscription_required").(bool) - protocolsRaw := d.Get("protocols").(*pluginsdk.Set).List() - protocols := expandApiManagementApiProtocols(protocolsRaw) - subscriptionKeyParameterNamesRaw := d.Get("subscription_key_parameter_names").([]interface{}) subscriptionKeyParameterNames := expandApiManagementApiSubscriptionKeyParamNames(subscriptionKeyParameterNamesRaw) @@ -364,7 +388,6 @@ func resourceApiManagementApiCreateUpdate(d *pluginsdk.ResourceData, meta interf APIType: apiType, SoapAPIType: soapApiType, Description: utils.String(description), - DisplayName: utils.String(displayName), Path: utils.String(path), Protocols: protocols, ServiceURL: utils.String(serviceUrl), @@ -372,9 +395,19 @@ func resourceApiManagementApiCreateUpdate(d *pluginsdk.ResourceData, meta interf APIVersion: utils.String(version), SubscriptionRequired: &subscriptionRequired, AuthenticationSettings: authenticationSettings, + APIRevisionDescription: utils.String(d.Get("revision_description").(string)), + APIVersionDescription: utils.String(d.Get("version_description").(string)), }, } + if sourceApiId != "" { + params.APICreateOrUpdateProperties.SourceAPIID = &sourceApiId + } + + if displayName != "" { + params.APICreateOrUpdateProperties.DisplayName = &displayName + } + if versionSetId != "" { params.APICreateOrUpdateProperties.APIVersionSetID = utils.String(versionSetId) } @@ -449,6 +482,8 @@ func resourceApiManagementApiRead(d *pluginsdk.ResourceData, meta interface{}) e d.Set("subscription_required", props.SubscriptionRequired) d.Set("version", props.APIVersion) d.Set("version_set_id", props.APIVersionSetID) + d.Set("revision_description", props.APIRevisionDescription) + d.Set("version_description", props.APIVersionDescription) if err := d.Set("protocols", flattenApiManagementApiProtocols(props.Protocols)); err != nil { return fmt.Errorf("setting `protocols`: %s", err) @@ -502,6 +537,9 @@ func resourceApiManagementApiDelete(d *pluginsdk.ResourceData, meta interface{}) } func expandApiManagementApiProtocols(input []interface{}) *[]apimanagement.Protocol { + if len(input) == 0 { + return nil + } results := make([]apimanagement.Protocol, 0) for _, v := range input { diff --git a/azurerm/internal/services/apimanagement/api_management_api_resource_test.go b/azurerm/internal/services/apimanagement/api_management_api_resource_test.go index 30853c59a314..749cd6aa99aa 100644 --- a/azurerm/internal/services/apimanagement/api_management_api_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_api_resource_test.go @@ -261,6 +261,66 @@ func TestAccApiManagementApi_complete(t *testing.T) { }) } +func TestAccApiManagementApi_cloneApi(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_api", "clone") + r := ApiManagementApiResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.cloneApi(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("source_api_id"), + }) +} + +func TestAccApiManagementApi_createNewVersionFromExisting(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_api", "version") + r := ApiManagementApiResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.createNewVersionFromExisting(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("source_api_id"), + }) +} + +func TestAccApiManagementApi_createRevisionFromExisting(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_api", "revision") + r := ApiManagementApiResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.createRevisionFromExisting(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("source_api_id"), + }) +} + +func TestAccApiManagementApi_createRevisionFromExistingRevision(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_api", "revision") + r := ApiManagementApiResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.createRevisionFromExistingRevision(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("source_api_id"), + }) +} + func (ApiManagementApiResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := azure.ParseAzureResourceID(state.ID) if err != nil { @@ -544,6 +604,78 @@ resource "azurerm_api_management_api" "test" { `, r.template(data), data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger) } +func (r ApiManagementApiResource) cloneApi(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_api" "clone" { + name = "acctestClone-%d" + resource_group_name = azurerm_resource_group.test.name + api_management_name = azurerm_api_management.test.name + display_name = "api1_clone" + revision = "1" + source_api_id = azurerm_api_management_api.test.id + description = "Copy of Existing Echo Api including Operations." +} +`, r.basic(data), data.RandomInteger) +} + +func (r ApiManagementApiResource) createNewVersionFromExisting(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_api_version_set" "test" { + name = "acctestAMAVS-%d" + resource_group_name = azurerm_resource_group.test.name + api_management_name = azurerm_api_management.test.name + display_name = "Butter Parser" + versioning_scheme = "Segment" +} + +resource "azurerm_api_management_api" "version" { + name = "acctestVersion-%d" + resource_group_name = azurerm_resource_group.test.name + api_management_name = azurerm_api_management.test.name + display_name = "api_version" + revision = "1" + source_api_id = azurerm_api_management_api.test.id + version = "v1" + version_set_id = azurerm_api_management_api_version_set.test.id + version_description = "Create Echo API into a new Version using Existing Version Set and Copy all Operations." +} +`, r.basic(data), data.RandomInteger, data.RandomInteger) +} + +func (r ApiManagementApiResource) createRevisionFromExisting(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_api" "revision" { + name = "acctestRevision-%d" + resource_group_name = azurerm_resource_group.test.name + api_management_name = azurerm_api_management.test.name + revision = "18" + source_api_id = azurerm_api_management_api.test.id + revision_description = "Creating a Revision of an existing API" +} +`, r.basic(data), data.RandomInteger) +} + +func (r ApiManagementApiResource) createRevisionFromExistingRevision(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_api" "revision" { + name = "acctestRevision-%d" + resource_group_name = azurerm_resource_group.test.name + api_management_name = azurerm_api_management.test.name + revision = "18" + source_api_id = "${azurerm_api_management_api.test.id};rev=3" + revision_description = "Creating a Revision of an existing API" +} +`, r.complete(data), data.RandomInteger) +} + func (ApiManagementApiResource) template(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { @@ -562,7 +694,7 @@ resource "azurerm_api_management" "test" { publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } `, data.RandomInteger, data.Locations.Primary, data.RandomInteger) } diff --git a/azurerm/internal/services/apimanagement/api_management_api_schema_resource.go b/azurerm/internal/services/apimanagement/api_management_api_schema_resource.go index b03bf8ff6850..d051eec97e94 100644 --- a/azurerm/internal/services/apimanagement/api_management_api_schema_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_api_schema_resource.go @@ -1,17 +1,17 @@ package apimanagement import ( + "encoding/json" "fmt" "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" @@ -53,6 +53,12 @@ func resourceApiManagementApiSchema() *pluginsdk.Resource { Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, + DiffSuppressFunc: func(k, old, new string, d *pluginsdk.ResourceData) bool { + if d.Get("content_type") == "application/vnd.ms-azure-apim.swagger.definitions+json" || d.Get("content_type") == "application/vnd.oai.openapi.components+json" { + return pluginsdk.SuppressJsonDiff(k, old, new, d) + } + return old == new + }, }, }, } @@ -96,15 +102,24 @@ func resourceApiManagementApiSchemaCreateUpdate(d *pluginsdk.ResourceData, meta return fmt.Errorf("creating or updating API Schema %q (API Management Service %q / API %q / Resource Group %q): %s", schemaID, serviceName, apiName, resourceGroup, err) } - resp, err := client.Get(ctx, resourceGroup, serviceName, apiName, schemaID) + //lintignore:R006 + err := pluginsdk.Retry(d.Timeout(pluginsdk.TimeoutCreate), func() *pluginsdk.RetryError { + resp, err := client.Get(ctx, resourceGroup, serviceName, apiName, schemaID) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return pluginsdk.RetryableError(fmt.Errorf("Expected schema %q (API Management Service %q / API %q / Resource Group %q) to be created but was in non existent state, retrying", schemaID, serviceName, apiName, resourceGroup)) + } + return pluginsdk.NonRetryableError(fmt.Errorf("Error getting schema %q (API Management Service %q / API %q / Resource Group %q): %+v", schemaID, serviceName, apiName, resourceGroup, err)) + } + if resp.ID == nil { + return pluginsdk.NonRetryableError(fmt.Errorf("Cannot read ID for API Schema %q (API Management Service %q / API %q / Resource Group %q): %s", schemaID, serviceName, apiName, resourceGroup, err)) + } + d.SetId(*resp.ID) + return nil + }) if err != nil { - return fmt.Errorf("retrieving API Schema %q (API Management Service %q / API %q / Resource Group %q): %s", schemaID, serviceName, apiName, resourceGroup, err) - } - if resp.ID == nil { - return fmt.Errorf("Cannot read ID for API Schema %q (API Management Service %q / API %q / Resource Group %q): %s", schemaID, serviceName, apiName, resourceGroup, err) + return fmt.Errorf("Error getting schema %q (API Management Service %q / API %q / Resource Group %q): %+v", schemaID, serviceName, apiName, resourceGroup, err) } - d.SetId(*resp.ID) - return resourceApiManagementApiSchemaRead(d, meta) } @@ -141,10 +156,33 @@ func resourceApiManagementApiSchemaRead(d *pluginsdk.ResourceData, meta interfac if properties := resp.SchemaContractProperties; properties != nil { d.Set("content_type", properties.ContentType) if documentProperties := properties.SchemaDocumentProperties; documentProperties != nil { - d.Set("value", documentProperties.Value) + /* + As per https://docs.microsoft.com/en-us/rest/api/apimanagement/2019-12-01/api-schema/get#schemacontract + + - Swagger Schema use application/vnd.ms-azure-apim.swagger.definitions+json + - WSDL Schema use application/vnd.ms-azure-apim.xsd+xml + - OpenApi Schema use application/vnd.oai.openapi.components+json + - WADL Schema use application/vnd.ms-azure-apim.wadl.grammars+xml. + + Definitions used for Swagger/OpenAPI schemas only, otherwise Value is used + */ + switch *properties.ContentType { + case "application/vnd.ms-azure-apim.swagger.definitions+json", "application/vnd.oai.openapi.components+json": + if documentProperties.Definitions != nil { + value, err := json.Marshal(documentProperties.Definitions) + if err != nil { + return fmt.Errorf("[FATAL] Unable to serialize schema to json. Error: %+v. Schema struct: %+v", err, documentProperties.Definitions) + } + d.Set("value", string(value)) + } + case "application/vnd.ms-azure-apim.xsd+xml", "application/vnd.ms-azure-apim.wadl.grammars+xml": + d.Set("value", documentProperties.Value) + default: + log.Printf("[WARN] Unknown content type %q for schema %q (API Management Service %q / API %q / Resource Group %q)", *properties.ContentType, schemaID, serviceName, apiName, resourceGroup) + d.Set("value", documentProperties.Value) + } } } - return nil } diff --git a/azurerm/internal/services/apimanagement/api_management_api_schema_resource_test.go b/azurerm/internal/services/apimanagement/api_management_api_schema_resource_test.go index 97ac96b100d0..4fbb9bedc5ab 100644 --- a/azurerm/internal/services/apimanagement/api_management_api_schema_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_api_schema_resource_test.go @@ -3,6 +3,8 @@ package apimanagement_test import ( "context" "fmt" + "io/ioutil" + "strings" "testing" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" @@ -19,12 +21,31 @@ type ApiManagementApiSchemaResource struct { func TestAccApiManagementApiSchema_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_api_management_api_schema", "test") r := ApiManagementApiSchemaResource{} + schema, _ := ioutil.ReadFile("testdata/api_management_api_schema.xml") data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("value").HasValue(string(schema)), + ), + }, + data.ImportStep(), + }) +} + +func TestAccApiManagementApiSchema_basicSwagger(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_api_schema", "test") + r := ApiManagementApiSchemaResource{} + schema, _ := ioutil.ReadFile("testdata/api_management_api_schema_swagger.json") + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basicSwagger(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("value").HasValue(strings.TrimRight(string(schema), "\r\n")), ), }, data.ImportStep(), @@ -74,7 +95,22 @@ resource "azurerm_api_management_api_schema" "test" { resource_group_name = azurerm_api_management_api.test.resource_group_name schema_id = "acctestSchema%d" content_type = "application/vnd.ms-azure-apim.xsd+xml" - value = file("testdata/api_management_api_pluginsdk.xml") + value = file("testdata/api_management_api_schema.xml") +} +`, r.template(data), data.RandomInteger) +} + +func (r ApiManagementApiSchemaResource) basicSwagger(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_api_schema" "test" { + api_name = azurerm_api_management_api.test.name + api_management_name = azurerm_api_management_api.test.api_management_name + resource_group_name = azurerm_api_management_api.test.resource_group_name + schema_id = "acctestSchema%d" + content_type = "application/vnd.ms-azure-apim.swagger.definitions+json" + value = file("testdata/api_management_api_schema_swagger.json") } `, r.template(data), data.RandomInteger) } @@ -84,12 +120,12 @@ func (r ApiManagementApiSchemaResource) requiresImport(data acceptance.TestData) %s resource "azurerm_api_management_api_schema" "import" { - api_name = azurerm_api_management_api_pluginsdk.test.api_name - api_management_name = azurerm_api_management_api_pluginsdk.test.api_management_name - resource_group_name = azurerm_api_management_api_pluginsdk.test.resource_group_name - schema_id = azurerm_api_management_api_pluginsdk.test.schema_id - content_type = azurerm_api_management_api_pluginsdk.test.content_type - value = azurerm_api_management_api_pluginsdk.test.value + api_name = azurerm_api_management_api_schema.test.api_name + api_management_name = azurerm_api_management_api_schema.test.api_management_name + resource_group_name = azurerm_api_management_api_schema.test.resource_group_name + schema_id = azurerm_api_management_api_schema.test.schema_id + content_type = azurerm_api_management_api_schema.test.content_type + value = azurerm_api_management_api_schema.test.value } `, r.basic(data)) } @@ -111,7 +147,7 @@ resource "azurerm_api_management" "test" { resource_group_name = azurerm_resource_group.test.name publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } resource "azurerm_api_management_api" "test" { diff --git a/azurerm/internal/services/apimanagement/api_management_api_version_set_data_source.go b/azurerm/internal/services/apimanagement/api_management_api_version_set_data_source.go index e8f191135271..a45e91cc1360 100644 --- a/azurerm/internal/services/apimanagement/api_management_api_version_set_data_source.go +++ b/azurerm/internal/services/apimanagement/api_management_api_version_set_data_source.go @@ -4,11 +4,10 @@ import ( "fmt" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" diff --git a/azurerm/internal/services/apimanagement/api_management_api_version_set_resource_test.go b/azurerm/internal/services/apimanagement/api_management_api_version_set_resource_test.go index 58f3b11b34cb..194a5bd7f2e4 100644 --- a/azurerm/internal/services/apimanagement/api_management_api_version_set_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_api_version_set_resource_test.go @@ -209,7 +209,7 @@ resource "azurerm_api_management" "test" { resource_group_name = azurerm_resource_group.test.name publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } `, data.RandomInteger, data.Locations.Primary, data.RandomInteger) } diff --git a/azurerm/internal/services/apimanagement/api_management_authorization_server_resource.go b/azurerm/internal/services/apimanagement/api_management_authorization_server_resource.go index 52a85fc563e3..ba1c9d9250bd 100644 --- a/azurerm/internal/services/apimanagement/api_management_authorization_server_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_authorization_server_resource.go @@ -5,13 +5,12 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" diff --git a/azurerm/internal/services/apimanagement/api_management_authorization_server_resource_test.go b/azurerm/internal/services/apimanagement/api_management_authorization_server_resource_test.go index bfb197aca6d5..d660afa58d64 100644 --- a/azurerm/internal/services/apimanagement/api_management_authorization_server_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_authorization_server_resource_test.go @@ -177,7 +177,7 @@ resource "azurerm_api_management" "test" { resource_group_name = azurerm_resource_group.test.name publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } `, data.RandomInteger, data.Locations.Primary, data.RandomInteger) } diff --git a/azurerm/internal/services/apimanagement/api_management_backend_resource.go b/azurerm/internal/services/apimanagement/api_management_backend_resource.go index fd12605070d0..9e095dfed892 100644 --- a/azurerm/internal/services/apimanagement/api_management_backend_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_backend_resource.go @@ -6,14 +6,13 @@ import ( "strings" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/validate" - "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" @@ -160,9 +159,17 @@ func resourceApiManagementBackend() *pluginsdk.Resource { Optional: true, Elem: &pluginsdk.Resource{ Schema: map[string]*pluginsdk.Schema{ + "client_certificate_id": { + Type: pluginsdk.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validate.CertificateID, + }, + "client_certificate_thumbprint": { Type: pluginsdk.TypeString, - Required: true, + Optional: true, + Computed: true, ValidateFunc: validation.StringIsNotEmpty, }, "management_endpoints": { @@ -470,14 +477,25 @@ func expandApiManagementBackendServiceFabricCluster(input []interface{}) (error, return nil, nil } v := input[0].(map[string]interface{}) - clientCertificatethumbprint := v["client_certificate_thumbprint"].(string) managementEndpoints := v["management_endpoints"].(*pluginsdk.Set).List() maxPartitionResolutionRetries := int32(v["max_partition_resolution_retries"].(int)) properties := apimanagement.BackendServiceFabricClusterProperties{ - ClientCertificatethumbprint: utils.String(clientCertificatethumbprint), ManagementEndpoints: utils.ExpandStringSlice(managementEndpoints), MaxPartitionResolutionRetries: utils.Int32(maxPartitionResolutionRetries), } + + if v2, ok := v["client_certificate_thumbprint"].(string); ok && v2 != "" { + properties.ClientCertificatethumbprint = utils.String(v2) + } + + if v2, ok := v["client_certificate_id"].(string); ok && v2 != "" { + properties.ClientCertificateID = utils.String(v2) + } + + if properties.ClientCertificateID == nil && properties.ClientCertificatethumbprint == nil { + return fmt.Errorf("at least one of `client_certificate_thumbprint` and `client_certificate_id` must be set"), nil + } + serverCertificateThumbprintsUnset := true serverX509NamesUnset := true if serverCertificateThumbprints := v["server_certificate_thumbprints"]; serverCertificateThumbprints != nil { @@ -590,6 +608,11 @@ func flattenApiManagementBackendServiceFabricCluster(input *apimanagement.Backen if clientCertificatethumbprint := input.ClientCertificatethumbprint; clientCertificatethumbprint != nil { result["client_certificate_thumbprint"] = *clientCertificatethumbprint } + + if input.ClientCertificateID != nil { + result["client_certificate_id"] = *input.ClientCertificateID + } + if managementEndpoints := input.ManagementEndpoints; managementEndpoints != nil { result["management_endpoints"] = *managementEndpoints } diff --git a/azurerm/internal/services/apimanagement/api_management_backend_resource_test.go b/azurerm/internal/services/apimanagement/api_management_backend_resource_test.go index fcc1fe306239..ddfc27af180c 100644 --- a/azurerm/internal/services/apimanagement/api_management_backend_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_backend_resource_test.go @@ -144,6 +144,21 @@ func TestAccApiManagementBackend_serviceFabric(t *testing.T) { }) } +func TestAccApiManagementBackend_serviceFabricClientCertificateId(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_backend", "test") + r := ApiManagementAuthorizationBackendResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.serviceFabricClientCertificateId(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func TestAccApiManagementBackend_disappears(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_api_management_backend", "test") r := ApiManagementAuthorizationBackendResource{} @@ -333,6 +348,39 @@ resource "azurerm_api_management_backend" "test" { `, r.template(data, "sf"), data.RandomInteger) } +func (r ApiManagementAuthorizationBackendResource) serviceFabricClientCertificateId(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_certificate" "test" { + name = "example-cert" + api_management_name = azurerm_api_management.test.name + resource_group_name = azurerm_resource_group.test.name + data = filebase64("testdata/keyvaultcert.pfx") + password = "" +} + +resource "azurerm_api_management_backend" "test" { + name = "acctestapi-%d" + resource_group_name = azurerm_resource_group.test.name + api_management_name = azurerm_api_management.test.name + protocol = "http" + url = "fabric:/mytestapp/acctest" + service_fabric_cluster { + client_certificate_id = azurerm_api_management_certificate.test.id + management_endpoints = [ + "https://acctestsf.com", + ] + max_partition_resolution_retries = 5 + server_certificate_thumbprints = [ + azurerm_api_management_certificate.test.thumbprint, + azurerm_api_management_certificate.test.thumbprint, + ] + } +} +`, r.template(data, "sf"), data.RandomInteger) +} + func (r ApiManagementAuthorizationBackendResource) requiresImport(data acceptance.TestData) string { return fmt.Sprintf(` %s @@ -364,7 +412,7 @@ resource "azurerm_api_management" "test" { resource_group_name = azurerm_resource_group.test.name publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } `, data.RandomInteger, testName, data.Locations.Primary, data.RandomInteger, testName) } diff --git a/azurerm/internal/services/apimanagement/api_management_certificate_resource.go b/azurerm/internal/services/apimanagement/api_management_certificate_resource.go index b137514a03e6..89a92de04404 100644 --- a/azurerm/internal/services/apimanagement/api_management_certificate_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_certificate_resource.go @@ -5,13 +5,12 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" keyVaultParse "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/keyvault/parse" keyVaultValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/keyvault/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" diff --git a/azurerm/internal/services/apimanagement/api_management_certificate_resource_test.go b/azurerm/internal/services/apimanagement/api_management_certificate_resource_test.go index 6312bcc79bd1..3fd079f77a41 100644 --- a/azurerm/internal/services/apimanagement/api_management_certificate_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_certificate_resource_test.go @@ -7,7 +7,6 @@ import ( "testing" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -161,7 +160,7 @@ resource "azurerm_api_management" "test" { resource_group_name = azurerm_resource_group.test.name publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } resource "azurerm_api_management_certificate" "test" { @@ -184,7 +183,7 @@ resource "azurerm_api_management" "test" { resource_group_name = azurerm_resource_group.test.name publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" identity { type = "SystemAssigned" @@ -231,7 +230,7 @@ resource "azurerm_api_management" "test" { resource_group_name = azurerm_resource_group.test.name publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" identity { type = "UserAssigned" diff --git a/azurerm/internal/services/apimanagement/api_management_custom_domain_resource.go b/azurerm/internal/services/apimanagement/api_management_custom_domain_resource.go index 51e2b7919e43..703204abe4fd 100644 --- a/azurerm/internal/services/apimanagement/api_management_custom_domain_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_custom_domain_resource.go @@ -7,13 +7,12 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -131,7 +130,7 @@ func apiManagementCustomDomainCreateUpdate(d *pluginsdk.ResourceData, meta inter stateConf.Timeout = d.Timeout(pluginsdk.TimeoutUpdate) } - if _, err = stateConf.WaitForState(); err != nil { + if _, err = stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for API Management Service %q (Resource Group: %q) to become ready: %+v", serviceName, resourceGroup, err) } @@ -149,7 +148,7 @@ func apiManagementCustomDomainCreateUpdate(d *pluginsdk.ResourceData, meta inter // Wait for the ProvisioningState to become "Succeeded" before attempting to update log.Printf("[DEBUG] Waiting for API Management Service %q (Resource Group: %q) to become ready", serviceName, resourceGroup) - if _, err = stateConf.WaitForState(); err != nil { + if _, err = stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for API Management Service %q (Resource Group: %q) to become ready: %+v", serviceName, resourceGroup, err) } @@ -235,7 +234,7 @@ func apiManagementCustomDomainDelete(d *pluginsdk.ResourceData, meta interface{} ContinuousTargetOccurence: 6, } - if _, err = stateConf.WaitForState(); err != nil { + if _, err = stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for API Management Service %q (Resource Group: %q) to become ready: %+v", serviceName, resourceGroup, err) } @@ -249,7 +248,7 @@ func apiManagementCustomDomainDelete(d *pluginsdk.ResourceData, meta interface{} // Wait for the ProvisioningState to become "Succeeded" before attempting to update log.Printf("[DEBUG] Waiting for API Management Service %q (Resource Group: %q) to become ready", serviceName, resourceGroup) - if _, err = stateConf.WaitForState(); err != nil { + if _, err = stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for API Management Service %q (Resource Group: %q) to become ready: %+v", serviceName, resourceGroup, err) } diff --git a/azurerm/internal/services/apimanagement/api_management_data_source.go b/azurerm/internal/services/apimanagement/api_management_data_source.go index fc260e095450..e659bdf4fe67 100644 --- a/azurerm/internal/services/apimanagement/api_management_data_source.go +++ b/azurerm/internal/services/apimanagement/api_management_data_source.go @@ -5,13 +5,12 @@ import ( "strings" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - msiparse "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/msi/parse" - "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" + msiparse "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/msi/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" diff --git a/azurerm/internal/services/apimanagement/api_management_diagnostic_resource_test.go b/azurerm/internal/services/apimanagement/api_management_diagnostic_resource_test.go index 309313727129..de469c6f0329 100644 --- a/azurerm/internal/services/apimanagement/api_management_diagnostic_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_diagnostic_resource_test.go @@ -121,7 +121,7 @@ resource "azurerm_api_management" "test" { resource_group_name = azurerm_resource_group.test.name publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } resource "azurerm_api_management_logger" "test" { diff --git a/azurerm/internal/services/apimanagement/api_management_gateway_api_resource.go b/azurerm/internal/services/apimanagement/api_management_gateway_api_resource.go new file mode 100644 index 000000000000..5f3f49da3062 --- /dev/null +++ b/azurerm/internal/services/apimanagement/api_management_gateway_api_resource.go @@ -0,0 +1,146 @@ +package apimanagement + +import ( + "fmt" + "log" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceApiManagementGatewayApi() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceApiManagementGatewayApiCreate, + Read: resourceApiManagementGatewayApiRead, + Delete: resourceApiManagementGatewayApiDelete, + + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.GatewayApiID(id) + return err + }), + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*pluginsdk.Schema{ + "api_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + }, + "gateway_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + }, + }, + } +} + +func resourceApiManagementGatewayApiCreate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).ApiManagement.GatewayApisClient + ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) + defer cancel() + + apiID, err := parse.ApiID(d.Get("api_id").(string)) + if err != nil { + return fmt.Errorf("parsing `api_id`: %v", err) + } + + gatewayID, err := parse.GatewayID(d.Get("gateway_id").(string)) + if err != nil { + return fmt.Errorf("parsing `gateway_id`: %v", err) + } + + exists, err := client.GetEntityTag(ctx, gatewayID.ResourceGroup, gatewayID.ServiceName, gatewayID.Name, apiID.Name) + if err != nil { + if !utils.ResponseWasStatusCode(exists, http.StatusNoContent) { + if !utils.ResponseWasNotFound(exists) { + return fmt.Errorf("checking for presence of existing %s: %+v", gatewayID, err) + } + } + } + + id := parse.NewGatewayApiID(gatewayID.SubscriptionId, gatewayID.ResourceGroup, gatewayID.ServiceName, gatewayID.Name, apiID.Name) + if !utils.ResponseWasNotFound(exists) { + return tf.ImportAsExistsError("azurerm_api_management_gateway_api", id.ID()) + } + params := &apimanagement.AssociationContract{} + _, err = client.CreateOrUpdate(ctx, gatewayID.ResourceGroup, gatewayID.ServiceName, gatewayID.Name, apiID.Name, params) + if err != nil { + return fmt.Errorf("creating %s: %+v", id, err) + } + d.SetId(id.ID()) + + return resourceApiManagementGatewayApiRead(d, meta) +} + +func resourceApiManagementGatewayApiRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).ApiManagement.GatewayApisClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.GatewayApiID(d.Id()) + if err != nil { + return err + } + + apiId := parse.NewApiID(id.SubscriptionId, id.ResourceGroup, id.ServiceName, id.ApiName) + resp, err := client.GetEntityTag(ctx, id.ResourceGroup, id.ServiceName, id.GatewayName, id.ApiName) + if err != nil { + if utils.ResponseWasNotFound(resp) { + log.Printf("[DEBUG] %s does not exist - removing from state!", id) + d.SetId("") + return nil + } + if utils.ResponseWasStatusCode(resp, http.StatusNoContent) { + log.Printf("[DEBUG] %s returned with No Content status - bypassing and moving on!", id) + } else { + return fmt.Errorf("retrieving %s: %+v", id, err) + } + } + if utils.ResponseWasNotFound(resp) { + log.Printf("[DEBUG] %s was not found - removing from state!", id) + d.SetId("") + return nil + } + gateway := parse.NewGatewayID(id.SubscriptionId, id.ResourceGroup, id.ServiceName, id.GatewayName) + + d.Set("api_id", apiId.ID()) + d.Set("gateway_id", gateway.ID()) + + return nil +} + +func resourceApiManagementGatewayApiDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).ApiManagement.GatewayApisClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.GatewayApiID(d.Id()) + if err != nil { + return err + } + + if resp, err := client.Delete(ctx, id.ResourceGroup, id.ServiceName, id.GatewayName, id.ApiName); err != nil { + if !utils.ResponseWasNotFound(resp) { + return fmt.Errorf("removing %s: %+v", id, err) + } + } + + return nil +} diff --git a/azurerm/internal/services/apimanagement/api_management_gateway_api_resource_test.go b/azurerm/internal/services/apimanagement/api_management_gateway_api_resource_test.go new file mode 100644 index 000000000000..86d9226fdcfe --- /dev/null +++ b/azurerm/internal/services/apimanagement/api_management_gateway_api_resource_test.go @@ -0,0 +1,128 @@ +package apimanagement_test + +import ( + "context" + "fmt" + "net/http" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type ApiManagementGatewayAPIResource struct { +} + +func TestAccApiManagementGatewayApi_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_gateway_api", "test") + r := ApiManagementGatewayAPIResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccApiManagementGatewayApi_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_gateway_api", "test") + r := ApiManagementGatewayAPIResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func (ApiManagementGatewayAPIResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.GatewayApiID(state.ID) + if err != nil { + return nil, err + } + if resp, err := clients.ApiManagement.GatewayApisClient.GetEntityTag(ctx, id.ResourceGroup, id.ServiceName, id.GatewayName, id.ApiName); err != nil { + if utils.ResponseWasNotFound(resp) { + return nil, fmt.Errorf("reading ApiManagement Gateway (%s): %+v", id, err) + } + + if !utils.ResponseWasStatusCode(resp, http.StatusNoContent) { + return nil, fmt.Errorf("reading ApiManagement Gateway (%s): %+v", id, err) + } + } + + return utils.Bool(true), nil +} + +func (ApiManagementGatewayAPIResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_api_management" "test" { + name = "acctestAM-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + publisher_name = "pub1" + publisher_email = "pub1@email.com" + + sku_name = "Developer_1" +} + +resource "azurerm_api_management_gateway" "test" { + name = "acctestAMGateway-%d" + api_management_id = azurerm_api_management.test.id + description = "this is a test gateway" + + location_data { + name = "old world" + city = "test city" + district = "test district" + region = "test region" + } +} + +resource "azurerm_api_management_api" "test" { + name = "acctestapi-%d" + resource_group_name = azurerm_resource_group.test.name + api_management_name = azurerm_api_management.test.name + display_name = "api1" + path = "api1" + protocols = ["https"] + revision = "1" +} + +resource "azurerm_api_management_gateway_api" "test" { + gateway_id = azurerm_api_management_gateway.test.id + api_id = azurerm_api_management_api.test.id +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger) +} + +func (r ApiManagementGatewayAPIResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_gateway_api" "import" { + gateway_id = azurerm_api_management_gateway_api.test.gateway_id + api_id = azurerm_api_management_gateway_api.test.api_id +} +`, r.basic(data)) +} diff --git a/azurerm/internal/services/apimanagement/api_management_gateway_data_source.go b/azurerm/internal/services/apimanagement/api_management_gateway_data_source.go new file mode 100644 index 000000000000..7014b321040d --- /dev/null +++ b/azurerm/internal/services/apimanagement/api_management_gateway_data_source.go @@ -0,0 +1,103 @@ +package apimanagement + +import ( + "fmt" + "time" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func dataSourceApiManagementGateway() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Read: dataSourceApiManagementGatewayRead, + + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + }, + + Schema: map[string]*pluginsdk.Schema{ + "name": schemaz.SchemaApiManagementChildDataSourceName(), + + "api_management_id": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validate.ApiManagementID, + }, + + "description": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "location_data": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "city": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "district": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "region": { + Type: pluginsdk.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceApiManagementGatewayRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).ApiManagement.GatewayClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + apimId, err := parse.ApiManagementID(d.Get("api_management_id").(string)) + if err != nil { + return fmt.Errorf("parsing `api_management_id`: %v", err) + } + + id := parse.NewGatewayID(apimId.SubscriptionId, apimId.ResourceGroup, apimId.ServiceName, d.Get("name").(string)) + + resp, err := client.Get(ctx, id.ResourceGroup, id.ServiceName, id.Name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return fmt.Errorf("%s was not found", id) + } + + return fmt.Errorf("making read request %s: %+v", id, err) + } + + _, err = parse.GatewayID(*resp.ID) + if err != nil { + return fmt.Errorf("parsing Gateway ID %q", *resp.ID) + } + + d.SetId(id.ID()) + + d.Set("name", resp.Name) + d.Set("api_management_id", apimId.ID()) + + if properties := resp.GatewayContractProperties; properties != nil { + d.Set("description", properties.Description) + d.Set("location_data", flattenApiManagementGatewayLocationData(properties.LocationData)) + } + + return nil +} diff --git a/azurerm/internal/services/apimanagement/api_management_gateway_data_source_test.go b/azurerm/internal/services/apimanagement/api_management_gateway_data_source_test.go new file mode 100644 index 000000000000..15029f792fc4 --- /dev/null +++ b/azurerm/internal/services/apimanagement/api_management_gateway_data_source_test.go @@ -0,0 +1,63 @@ +package apimanagement_test + +import ( + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" +) + +type ApiManagementGatewayDataSource struct { +} + +func TestAccDataSourceApiManagementGateway_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "data.azurerm_api_management_gateway", "test") + r := ApiManagementGatewayDataSource{} + + data.DataSourceTest(t, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).Key("description").HasValue(""), + check.That(data.ResourceName).Key("location_data.0.name").HasValue("test"), + ), + }, + }) +} + +func (ApiManagementGatewayDataSource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_api_management" "test" { + name = "acctestAM-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + publisher_name = "pub1" + publisher_email = "pub1@email.com" + sku_name = "Developer_1" +} + +resource "azurerm_api_management_gateway" "test" { + name = "acctestAMGateway-%d" + api_management_id = azurerm_api_management.test.id + + location_data { + name = "test" + } +} + +data "azurerm_api_management_gateway" "test" { + name = azurerm_api_management_gateway.test.name + api_management_id = azurerm_api_management_gateway.test.api_management_id +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) +} diff --git a/azurerm/internal/services/apimanagement/api_management_gateway_resource.go b/azurerm/internal/services/apimanagement/api_management_gateway_resource.go new file mode 100644 index 000000000000..575e1f7ed89e --- /dev/null +++ b/azurerm/internal/services/apimanagement/api_management_gateway_resource.go @@ -0,0 +1,221 @@ +package apimanagement + +import ( + "fmt" + "log" + "time" + + "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceApiManagementGateway() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceApiManagementGatewayCreateUpdate, + Read: resourceApiManagementGatewayRead, + Update: resourceApiManagementGatewayCreateUpdate, + Delete: resourceApiManagementGatewayDelete, + + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.GatewayID(id) + return err + }), + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*pluginsdk.Schema{ + "name": schemaz.SchemaApiManagementChildName(), + + "api_management_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.ApiManagementID, + }, + + "description": { + Type: pluginsdk.TypeString, + Optional: true, + }, + + "location_data": { + Type: pluginsdk.TypeList, + Required: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + }, + "city": { + Type: pluginsdk.TypeString, + Optional: true, + }, + "district": { + Type: pluginsdk.TypeString, + Optional: true, + }, + "region": { + Type: pluginsdk.TypeString, + Optional: true, + }, + }, + }, + }, + }, + } +} + +func resourceApiManagementGatewayCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).ApiManagement.GatewayClient + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + apimId, err := parse.ApiManagementID(d.Get("api_management_id").(string)) + if err != nil { + return fmt.Errorf("parsing `api_management_id`: %v", err) + } + + id := parse.NewGatewayID(apimId.SubscriptionId, apimId.ResourceGroup, apimId.ServiceName, d.Get("name").(string)) + + if d.IsNewResource() { + existing, err := client.Get(ctx, id.ResourceGroup, id.ServiceName, id.Name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("making read request %s: %+v", id, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_api_management_gateway", id.ID()) + } + } + + description := d.Get("description").(string) + locationData := expandApiManagementGatewayLocationData(d.Get("location_data").([]interface{})) + + parameters := apimanagement.GatewayContract{ + GatewayContractProperties: &apimanagement.GatewayContractProperties{ + Description: utils.String(description), + LocationData: locationData, + }, + } + + if _, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.ServiceName, id.Name, parameters, ""); err != nil { + return fmt.Errorf("creating or updating %s: %+v", id, err) + } + + d.SetId(id.ID()) + + return resourceApiManagementGatewayRead(d, meta) +} + +func resourceApiManagementGatewayRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).ApiManagement.GatewayClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.GatewayID(d.Id()) + if err != nil { + return err + } + resourceGroup := id.ResourceGroup + serviceName := id.ServiceName + name := id.Name + apimId := parse.NewApiManagementID(id.SubscriptionId, id.ResourceGroup, id.ServiceName) + + resp, err := client.Get(ctx, resourceGroup, serviceName, name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[DEBUG] Gateway %q (Resource Group %q / API Management Service %q) was not found - removing from state!", name, resourceGroup, serviceName) + d.SetId("") + return nil + } + + return fmt.Errorf("making read request for %s: %+v", id, err) + } + + d.Set("name", resp.Name) + d.Set("api_management_id", apimId.ID()) + + if properties := resp.GatewayContractProperties; properties != nil { + d.Set("description", properties.Description) + d.Set("location_data", flattenApiManagementGatewayLocationData(properties.LocationData)) + } + + return nil +} + +func resourceApiManagementGatewayDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).ApiManagement.GatewayClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.GatewayID(d.Id()) + if err != nil { + return err + } + resourceGroup := id.ResourceGroup + serviceName := id.ServiceName + name := id.Name + + if resp, err := client.Delete(ctx, resourceGroup, serviceName, name, ""); err != nil { + if !utils.ResponseWasNotFound(resp) { + return fmt.Errorf("deleting %s: %+v", id, err) + } + } + + return nil +} + +func expandApiManagementGatewayLocationData(input []interface{}) *apimanagement.ResourceLocationDataContract { + if len(input) == 0 { + return nil + } + + locationData := apimanagement.ResourceLocationDataContract{} + + vs := input[0].(map[string]interface{}) + for k, v := range vs { + switch k { + case "name": + locationData.Name = utils.String(v.(string)) + case "city": + locationData.City = utils.String(v.(string)) + case "district": + locationData.District = utils.String(v.(string)) + case "region": + locationData.CountryOrRegion = utils.String(v.(string)) + } + } + + return &locationData +} + +func flattenApiManagementGatewayLocationData(input *apimanagement.ResourceLocationDataContract) []interface{} { + if input == nil { + return []interface{}{} + } + + locationData := map[string]interface{}{ + "name": utils.NormalizeNilableString(input.Name), + "city": utils.NormalizeNilableString(input.City), + "region": utils.NormalizeNilableString(input.CountryOrRegion), + "district": utils.NormalizeNilableString(input.District), + } + + return []interface{}{locationData} +} diff --git a/azurerm/internal/services/apimanagement/api_management_gateway_resource_test.go b/azurerm/internal/services/apimanagement/api_management_gateway_resource_test.go new file mode 100644 index 000000000000..87d53c435f82 --- /dev/null +++ b/azurerm/internal/services/apimanagement/api_management_gateway_resource_test.go @@ -0,0 +1,241 @@ +package apimanagement_test + +import ( + "context" + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type ApiManagementGatewayResource struct { +} + +func TestAccApiManagementGateway_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_gateway", "test") + r := ApiManagementGatewayResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("description").HasValue(""), + check.That(data.ResourceName).Key("location_data.0.name").HasValue("test"), + ), + }, + data.ImportStep(), + }) +} + +func TestAccApiManagementGateway_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_gateway", "test") + r := ApiManagementGatewayResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func TestAccApiManagementGateway_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_gateway", "test") + r := ApiManagementGatewayResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data, "test description", "test location"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("description").HasValue("test description"), + check.That(data.ResourceName).Key("location_data.0.name").HasValue("test location"), + check.That(data.ResourceName).Key("location_data.0.city").HasValue("test city"), + check.That(data.ResourceName).Key("location_data.0.district").HasValue("test district"), + check.That(data.ResourceName).Key("location_data.0.region").HasValue("test region"), + ), + }, + data.ImportStep(), + }) +} + +func TestAccApiManagementGateway_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_gateway", "test") + r := ApiManagementGatewayResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data, "original description", "original location"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("description").HasValue("original description"), + check.That(data.ResourceName).Key("location_data.#").HasValue("1"), + check.That(data.ResourceName).Key("location_data.0.name").HasValue("original location"), + check.That(data.ResourceName).Key("location_data.0.city").HasValue("test city"), + check.That(data.ResourceName).Key("location_data.0.district").HasValue("test district"), + check.That(data.ResourceName).Key("location_data.0.region").HasValue("test region"), + ), + }, + { + Config: r.update(data, "updated description", "updated location"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("description").HasValue("updated description"), + check.That(data.ResourceName).Key("location_data.0.name").HasValue("updated location"), + check.That(data.ResourceName).Key("location_data.0.city").HasValue(""), + check.That(data.ResourceName).Key("location_data.0.district").HasValue(""), + check.That(data.ResourceName).Key("location_data.0.region").HasValue(""), + ), + }, + { + Config: r.complete(data, "original description", "original location"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("description").HasValue("original description"), + check.That(data.ResourceName).Key("location_data.0.name").HasValue("original location"), + check.That(data.ResourceName).Key("location_data.0.city").HasValue("test city"), + check.That(data.ResourceName).Key("location_data.0.district").HasValue("test district"), + check.That(data.ResourceName).Key("location_data.0.region").HasValue("test region"), + ), + }, + }) +} + +func (ApiManagementGatewayResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.GatewayID(state.ID) + if err != nil { + return nil, err + } + + resp, err := clients.ApiManagement.GatewayClient.Get(ctx, id.ResourceGroup, id.ServiceName, id.Name) + if err != nil { + return nil, fmt.Errorf("reading ApiManagementGateway (%s): %+v", id, err) + } + + return utils.Bool(resp.ID != nil), nil +} + +func (ApiManagementGatewayResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_api_management" "test" { + name = "acctestAM-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + publisher_name = "pub1" + publisher_email = "pub1@email.com" + + sku_name = "Developer_1" +} + +resource "azurerm_api_management_gateway" "test" { + name = "acctestAMGateway-%d" + api_management_id = azurerm_api_management.test.id + + location_data { + name = "test" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) +} + +func (r ApiManagementGatewayResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_gateway" "import" { + name = azurerm_api_management_gateway.test.name + api_management_id = azurerm_api_management_gateway.test.api_management_id + + location_data { + name = "test" + } +} +`, r.basic(data)) +} + +func (ApiManagementGatewayResource) update(data acceptance.TestData, description string, locationName string) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_api_management" "test" { + name = "acctestAM-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + publisher_name = "pub1" + publisher_email = "pub1@email.com" + + sku_name = "Developer_1" +} + +resource "azurerm_api_management_gateway" "test" { + name = "acctestAMGateway-%d" + api_management_id = azurerm_api_management.test.id + description = "%s" + + location_data { + name = "%s" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, description, locationName) +} + +func (ApiManagementGatewayResource) complete(data acceptance.TestData, description string, locationName string) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_api_management" "test" { + name = "acctestAM-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + publisher_name = "pub1" + publisher_email = "pub1@email.com" + + sku_name = "Developer_1" +} + +resource "azurerm_api_management_gateway" "test" { + name = "acctestAMGateway-%d" + api_management_id = azurerm_api_management.test.id + description = "%s" + + location_data { + name = "%s" + city = "test city" + district = "test district" + region = "test region" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, description, locationName) +} diff --git a/azurerm/internal/services/apimanagement/api_management_group_data_source.go b/azurerm/internal/services/apimanagement/api_management_group_data_source.go index f06055b04ba6..9acb71a71cd7 100644 --- a/azurerm/internal/services/apimanagement/api_management_group_data_source.go +++ b/azurerm/internal/services/apimanagement/api_management_group_data_source.go @@ -4,11 +4,10 @@ import ( "fmt" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" diff --git a/azurerm/internal/services/apimanagement/api_management_group_resource.go b/azurerm/internal/services/apimanagement/api_management_group_resource.go index d9b7192f1414..6cfa2cac4d14 100644 --- a/azurerm/internal/services/apimanagement/api_management_group_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_group_resource.go @@ -5,13 +5,12 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" diff --git a/azurerm/internal/services/apimanagement/api_management_group_user_resource.go b/azurerm/internal/services/apimanagement/api_management_group_user_resource.go index 72342ece79df..8cf69dee739e 100644 --- a/azurerm/internal/services/apimanagement/api_management_group_user_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_group_user_resource.go @@ -5,12 +5,11 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" diff --git a/azurerm/internal/services/apimanagement/api_management_group_user_resource_test.go b/azurerm/internal/services/apimanagement/api_management_group_user_resource_test.go index 3b363d11a57a..96464e71f0ba 100644 --- a/azurerm/internal/services/apimanagement/api_management_group_user_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_group_user_resource_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -57,9 +56,14 @@ func (ApiManagementGroupUserResource) Exists(ctx context.Context, clients *clien groupName := id.Path["groups"] userId := id.Path["users"] - if _, err = clients.ApiManagement.GroupUsersClient.CheckEntityExists(ctx, resourceGroup, serviceName, groupName, userId); err != nil { + resp, err := clients.ApiManagement.GroupUsersClient.CheckEntityExists(ctx, resourceGroup, serviceName, groupName, userId) + if err != nil { return nil, fmt.Errorf("reading ApiManagement Group User (%s): %+v", id, err) } + // the HEAD API not found returns resp 404, but no err + if utils.ResponseWasNotFound(resp) { + return utils.Bool(false), nil + } return utils.Bool(true), nil } diff --git a/azurerm/internal/services/apimanagement/api_management_identity_provider_aad_resource.go b/azurerm/internal/services/apimanagement/api_management_identity_provider_aad_resource.go index b4ecb4bf78b4..889926bd8daa 100644 --- a/azurerm/internal/services/apimanagement/api_management_identity_provider_aad_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_identity_provider_aad_resource.go @@ -5,13 +5,12 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" diff --git a/azurerm/internal/services/apimanagement/api_management_identity_provider_aad_resource_test.go b/azurerm/internal/services/apimanagement/api_management_identity_provider_aad_resource_test.go index 26e6ecb0b2b9..e1a54646d58d 100644 --- a/azurerm/internal/services/apimanagement/api_management_identity_provider_aad_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_identity_provider_aad_resource_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) diff --git a/azurerm/internal/services/apimanagement/api_management_identity_provider_aadb2c_resource.go b/azurerm/internal/services/apimanagement/api_management_identity_provider_aadb2c_resource.go index f354c4776bfd..25f43744bb15 100644 --- a/azurerm/internal/services/apimanagement/api_management_identity_provider_aadb2c_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_identity_provider_aadb2c_resource.go @@ -6,7 +6,6 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" diff --git a/azurerm/internal/services/apimanagement/api_management_identity_provider_aadb2c_resource_test.go b/azurerm/internal/services/apimanagement/api_management_identity_provider_aadb2c_resource_test.go index 50a763bd07b2..9fe45617c573 100644 --- a/azurerm/internal/services/apimanagement/api_management_identity_provider_aadb2c_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_identity_provider_aadb2c_resource_test.go @@ -8,12 +8,11 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) diff --git a/azurerm/internal/services/apimanagement/api_management_identity_provider_facebook_resource.go b/azurerm/internal/services/apimanagement/api_management_identity_provider_facebook_resource.go index d8a384dd677c..c3a28991b5d1 100644 --- a/azurerm/internal/services/apimanagement/api_management_identity_provider_facebook_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_identity_provider_facebook_resource.go @@ -5,13 +5,12 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" diff --git a/azurerm/internal/services/apimanagement/api_management_identity_provider_facebook_resource_test.go b/azurerm/internal/services/apimanagement/api_management_identity_provider_facebook_resource_test.go index eb43a5ff9faf..50a9a1aad811 100644 --- a/azurerm/internal/services/apimanagement/api_management_identity_provider_facebook_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_identity_provider_facebook_resource_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) diff --git a/azurerm/internal/services/apimanagement/api_management_identity_provider_google_resource.go b/azurerm/internal/services/apimanagement/api_management_identity_provider_google_resource.go index 945fe6327733..082d8e54df12 100644 --- a/azurerm/internal/services/apimanagement/api_management_identity_provider_google_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_identity_provider_google_resource.go @@ -5,14 +5,13 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" diff --git a/azurerm/internal/services/apimanagement/api_management_identity_provider_google_resource_test.go b/azurerm/internal/services/apimanagement/api_management_identity_provider_google_resource_test.go index 57cd8c932778..9cedda9a6845 100644 --- a/azurerm/internal/services/apimanagement/api_management_identity_provider_google_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_identity_provider_google_resource_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) diff --git a/azurerm/internal/services/apimanagement/api_management_identity_provider_microsoft_resource.go b/azurerm/internal/services/apimanagement/api_management_identity_provider_microsoft_resource.go index e094a58b181a..1b68cbef6aba 100644 --- a/azurerm/internal/services/apimanagement/api_management_identity_provider_microsoft_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_identity_provider_microsoft_resource.go @@ -5,13 +5,12 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" diff --git a/azurerm/internal/services/apimanagement/api_management_identity_provider_microsoft_resource_test.go b/azurerm/internal/services/apimanagement/api_management_identity_provider_microsoft_resource_test.go index dc5307411599..cbe9a0e1e907 100644 --- a/azurerm/internal/services/apimanagement/api_management_identity_provider_microsoft_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_identity_provider_microsoft_resource_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) diff --git a/azurerm/internal/services/apimanagement/api_management_identity_provider_twitter_resource.go b/azurerm/internal/services/apimanagement/api_management_identity_provider_twitter_resource.go index 132913c46ae1..f940c5f17303 100644 --- a/azurerm/internal/services/apimanagement/api_management_identity_provider_twitter_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_identity_provider_twitter_resource.go @@ -5,13 +5,12 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" diff --git a/azurerm/internal/services/apimanagement/api_management_identity_provider_twitter_resource_test.go b/azurerm/internal/services/apimanagement/api_management_identity_provider_twitter_resource_test.go index c26d6dc8a4be..d8db27a8a2de 100644 --- a/azurerm/internal/services/apimanagement/api_management_identity_provider_twitter_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_identity_provider_twitter_resource_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) diff --git a/azurerm/internal/services/apimanagement/api_management_logger_resource_test.go b/azurerm/internal/services/apimanagement/api_management_logger_resource_test.go index e4a3b4310420..9a99bd45c824 100644 --- a/azurerm/internal/services/apimanagement/api_management_logger_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_logger_resource_test.go @@ -78,7 +78,7 @@ func TestAccApiManagementLogger_basicApplicationInsights(t *testing.T) { ResourceName: data.ResourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"application_insights.#", "application_insights.0.instrumentation_key"}, + ImportStateVerifyIgnore: []string{"application_insights.#", "application_insights.0.instrumentation_key", "application_insights.0.%"}, }, }) } @@ -104,7 +104,7 @@ func TestAccApiManagementLogger_complete(t *testing.T) { ResourceName: data.ResourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"application_insights.#", "application_insights.0.instrumentation_key"}, + ImportStateVerifyIgnore: []string{"application_insights.#", "application_insights.0.instrumentation_key", "application_insights.0.%"}, }, }) } @@ -233,7 +233,7 @@ resource "azurerm_api_management" "test" { publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } resource "azurerm_api_management_logger" "test" { @@ -291,7 +291,7 @@ resource "azurerm_api_management" "test" { publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } resource "azurerm_api_management_logger" "test" { @@ -331,7 +331,7 @@ resource "azurerm_api_management" "test" { publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } resource "azurerm_api_management_logger" "test" { diff --git a/azurerm/internal/services/apimanagement/api_management_named_value_resource.go b/azurerm/internal/services/apimanagement/api_management_named_value_resource.go index 21e63ad85ee5..0461e955d381 100644 --- a/azurerm/internal/services/apimanagement/api_management_named_value_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_named_value_resource.go @@ -5,13 +5,13 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" + keyVaultValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/keyvault/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" @@ -47,11 +47,33 @@ func resourceApiManagementNamedValue() *pluginsdk.Resource { ValidateFunc: validation.StringIsNotEmpty, }, + "value_from_key_vault": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + ExactlyOneOf: []string{"value", "value_from_key_vault"}, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "secret_id": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: keyVaultValidate.NestedItemIdWithOptionalVersion, + }, + "identity_client_id": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.IsUUID, + }, + }, + }, + }, + "value": { Type: pluginsdk.TypeString, - Required: true, + Optional: true, Sensitive: true, ValidateFunc: validation.StringIsNotEmpty, + ExactlyOneOf: []string{"value", "value_from_key_vault"}, }, "secret": { @@ -97,10 +119,14 @@ func resourceApiManagementNamedValueCreateUpdate(d *pluginsdk.ResourceData, meta NamedValueCreateContractProperties: &apimanagement.NamedValueCreateContractProperties{ DisplayName: utils.String(d.Get("display_name").(string)), Secret: utils.Bool(d.Get("secret").(bool)), - Value: utils.String(d.Get("value").(string)), + KeyVault: expandApiManagementNamedValueKeyVault(d.Get("value_from_key_vault").([]interface{})), }, } + if v, ok := d.GetOk("value"); ok { + parameters.NamedValueCreateContractProperties.Value = utils.String(v.(string)) + } + if tags, ok := d.GetOk("tags"); ok { parameters.NamedValueCreateContractProperties.Tags = utils.ExpandStringSlice(tags.([]interface{})) } @@ -161,6 +187,9 @@ func resourceApiManagementNamedValueRead(d *pluginsdk.ResourceData, meta interfa if properties.Secret != nil && !*properties.Secret { d.Set("value", properties.Value) } + if err := d.Set("value_from_key_vault", flattenApiManagementNamedValueKeyVault(properties.KeyVault)); err != nil { + return fmt.Errorf("setting `value_from_key_vault`: %+v", err) + } d.Set("tags", properties.Tags) } @@ -188,3 +217,37 @@ func resourceApiManagementNamedValueDelete(d *pluginsdk.ResourceData, meta inter return nil } + +func expandApiManagementNamedValueKeyVault(inputs []interface{}) *apimanagement.KeyVaultContractCreateProperties { + if len(inputs) == 0 { + return nil + } + input := inputs[0].(map[string]interface{}) + + return &apimanagement.KeyVaultContractCreateProperties{ + SecretIdentifier: utils.String(input["secret_id"].(string)), + IdentityClientID: utils.String(input["identity_client_id"].(string)), + } +} + +func flattenApiManagementNamedValueKeyVault(input *apimanagement.KeyVaultContractProperties) []interface{} { + if input == nil { + return []interface{}{} + } + + var secretId, clientId string + if input.SecretIdentifier != nil { + secretId = *input.SecretIdentifier + } + + if input.IdentityClientID != nil { + clientId = *input.IdentityClientID + } + + return []interface{}{ + map[string]interface{}{ + "secret_id": secretId, + "identity_client_id": clientId, + }, + } +} diff --git a/azurerm/internal/services/apimanagement/api_management_named_value_resource_test.go b/azurerm/internal/services/apimanagement/api_management_named_value_resource_test.go index afe161490a8c..481c1d721c28 100644 --- a/azurerm/internal/services/apimanagement/api_management_named_value_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_named_value_resource_test.go @@ -31,6 +31,50 @@ func TestAccApiManagementNamedValue_basic(t *testing.T) { }) } +func TestAccApiManagementNamedValue_keyVault(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_named_value", "test") + r := ApiManagementNamedValueResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.keyVault(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccApiManagementNamedValue_keyVaultUpdate(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_named_value", "test") + r := ApiManagementNamedValueResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.keyVault(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.keyVaultUpdate(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.keyVaultUpdateToValue(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func TestAccApiManagementNamedValue_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_api_management_named_value", "test") r := ApiManagementNamedValueResource{} @@ -70,7 +114,7 @@ func (ApiManagementNamedValueResource) Exists(ctx context.Context, clients *clie return utils.Bool(resp.ID != nil), nil } -func (ApiManagementNamedValueResource) basic(data acceptance.TestData) string { +func (ApiManagementNamedValueResource) template(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { features {} @@ -88,8 +132,14 @@ resource "azurerm_api_management" "test" { publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +} + +func (r ApiManagementNamedValueResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s resource "azurerm_api_management_named_value" "test" { name = "acctestAMProperty-%d" @@ -99,38 +149,186 @@ resource "azurerm_api_management_named_value" "test" { value = "Test Value" tags = ["tag1", "tag2"] } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger) +`, r.template(data), data.RandomInteger, data.RandomInteger) +} + +func (r ApiManagementNamedValueResource) update(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_named_value" "test" { + name = "acctestAMProperty-%d" + resource_group_name = azurerm_resource_group.test.name + api_management_name = azurerm_api_management.test.name + display_name = "TestProperty2%d" + value = "Test Value2" + secret = true + tags = ["tag3", "tag4"] +} +`, r.template(data), data.RandomInteger, data.RandomInteger) } -func (ApiManagementNamedValueResource) update(data acceptance.TestData) string { +func (r ApiManagementNamedValueResource) keyVaultTemplate(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { - features {} + features { + key_vault { + purge_soft_delete_on_destroy = true + } + } } resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "%s" + name = "acctestRG-Apim-%[1]d" + location = "%[2]s" +} + +resource "azurerm_user_assigned_identity" "test" { + name = "acctestUAI-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name } resource "azurerm_api_management" "test" { - name = "acctestAM-%d" + name = "acctestAM-%[1]d" location = azurerm_resource_group.test.location resource_group_name = azurerm_resource_group.test.name publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" + + identity { + type = "UserAssigned" + identity_ids = [ + azurerm_user_assigned_identity.test.id, + ] + } +} + +data "azurerm_client_config" "current" {} + +resource "azurerm_key_vault" "test" { + name = "acctestKV-%[3]s" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + tenant_id = data.azurerm_client_config.current.tenant_id + sku_name = "standard" +} + +resource "azurerm_key_vault_access_policy" "test" { + key_vault_id = azurerm_key_vault.test.id + tenant_id = data.azurerm_client_config.current.tenant_id + object_id = data.azurerm_client_config.current.object_id + certificate_permissions = [ + "Create", + "Delete", + "Deleteissuers", + "Get", + "Getissuers", + "Import", + "List", + "Listissuers", + "Managecontacts", + "Manageissuers", + "Setissuers", + "Update", + "Purge", + ] + secret_permissions = [ + "Get", + "Delete", + "List", + "Purge", + "Recover", + "Set", + ] +} + +resource "azurerm_key_vault_access_policy" "test2" { + key_vault_id = azurerm_key_vault.test.id + tenant_id = azurerm_user_assigned_identity.test.tenant_id + object_id = azurerm_user_assigned_identity.test.principal_id + secret_permissions = [ + "Get", + "List", + ] +} + +resource "azurerm_key_vault_secret" "test" { + name = "secret-%[3]s" + value = "rick-and-morty" + key_vault_id = azurerm_key_vault.test.id + + depends_on = [azurerm_key_vault_access_policy.test] +} + +resource "azurerm_key_vault_secret" "test2" { + name = "secret2-%[3]s" + value = "rick-and-morty2" + key_vault_id = azurerm_key_vault.test.id + + depends_on = [azurerm_key_vault_access_policy.test] +} + +`, data.RandomInteger, data.Locations.Primary, data.RandomString) +} + +func (r ApiManagementNamedValueResource) keyVault(data acceptance.TestData) string { + return fmt.Sprintf(` +%[1]s + +resource "azurerm_api_management_named_value" "test" { + name = "acctestAMProperty-%[2]d" + resource_group_name = azurerm_resource_group.test.name + api_management_name = azurerm_api_management.test.name + display_name = "TestKeyVault%[2]d" + secret = true + value_from_key_vault { + secret_id = azurerm_key_vault_secret.test.id + identity_client_id = azurerm_user_assigned_identity.test.client_id + } + + tags = ["tag1", "tag2"] + + depends_on = [azurerm_key_vault_access_policy.test2] +} +`, r.keyVaultTemplate(data), data.RandomInteger) } +func (r ApiManagementNamedValueResource) keyVaultUpdate(data acceptance.TestData) string { + return fmt.Sprintf(` +%[1]s + resource "azurerm_api_management_named_value" "test" { - name = "acctestAMProperty-%d" - resource_group_name = azurerm_api_management.test.resource_group_name + name = "acctestAMProperty-%[2]d" + resource_group_name = azurerm_resource_group.test.name api_management_name = azurerm_api_management.test.name - display_name = "TestProperty2%d" - value = "Test Value2" + display_name = "TestKeyVault%[2]d" secret = true - tags = ["tag3", "tag4"] + value_from_key_vault { + secret_id = azurerm_key_vault_secret.test2.id + identity_client_id = azurerm_user_assigned_identity.test.client_id + } + tags = ["tag3", "tag4"] + + depends_on = [azurerm_key_vault_access_policy.test2] +} +`, r.keyVaultTemplate(data), data.RandomInteger) +} + +func (r ApiManagementNamedValueResource) keyVaultUpdateToValue(data acceptance.TestData) string { + return fmt.Sprintf(` +%[1]s + +resource "azurerm_api_management_named_value" "test" { + name = "acctestAMProperty-%[2]d" + resource_group_name = azurerm_resource_group.test.name + api_management_name = azurerm_api_management.test.name + display_name = "TestKeyVault%[2]d" + secret = false + value = "Key Vault to Value" + tags = ["tag5", "tag6"] } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger) +`, r.keyVaultTemplate(data), data.RandomInteger) } diff --git a/azurerm/internal/services/apimanagement/api_management_openid_connect_provider_resource.go b/azurerm/internal/services/apimanagement/api_management_openid_connect_provider_resource.go index 0087de2e5529..a863f85d279c 100644 --- a/azurerm/internal/services/apimanagement/api_management_openid_connect_provider_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_openid_connect_provider_resource.go @@ -5,13 +5,12 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" diff --git a/azurerm/internal/services/apimanagement/api_management_openid_connect_provider_resource_test.go b/azurerm/internal/services/apimanagement/api_management_openid_connect_provider_resource_test.go index 0b8c7e4b6759..614c8949bcd9 100644 --- a/azurerm/internal/services/apimanagement/api_management_openid_connect_provider_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_openid_connect_provider_resource_test.go @@ -151,7 +151,7 @@ resource "azurerm_api_management" "test" { resource_group_name = azurerm_resource_group.test.name publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } `, data.RandomInteger, data.Locations.Primary, data.RandomInteger) } diff --git a/azurerm/internal/services/apimanagement/api_management_policy_resource_test.go b/azurerm/internal/services/apimanagement/api_management_policy_resource_test.go index 01677d7088ca..b7db0cc97bb7 100644 --- a/azurerm/internal/services/apimanagement/api_management_policy_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_policy_resource_test.go @@ -101,12 +101,12 @@ resource "azurerm_api_management" "test" { resource_group_name = azurerm_resource_group.test.name publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } resource "azurerm_api_management_policy" "test" { api_management_id = azurerm_api_management.test.id - xml_link = "https://raw.githubusercontent.com/terraform-providers/terraform-provider-azurerm/master/azurerm/internal/services/apimanagement/tests/testdata/api_management_policy_test.xml" + xml_link = "https://raw.githubusercontent.com/terraform-providers/terraform-provider-azurerm/master/azurerm/internal/services/apimanagement/testdata/api_management_policy_test.xml" } `, data.RandomInteger, data.Locations.Primary) } @@ -128,7 +128,7 @@ resource "azurerm_api_management" "test" { resource_group_name = azurerm_resource_group.test.name publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } resource "azurerm_api_management_policy" "test" { diff --git a/azurerm/internal/services/apimanagement/api_management_product_api_resource.go b/azurerm/internal/services/apimanagement/api_management_product_api_resource.go index 69f7de16edb5..2fb348a47b0b 100644 --- a/azurerm/internal/services/apimanagement/api_management_product_api_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_product_api_resource.go @@ -5,12 +5,11 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" diff --git a/azurerm/internal/services/apimanagement/api_management_product_api_resource_test.go b/azurerm/internal/services/apimanagement/api_management_product_api_resource_test.go index 0a85976464d3..55a77acde09c 100644 --- a/azurerm/internal/services/apimanagement/api_management_product_api_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_product_api_resource_test.go @@ -81,7 +81,7 @@ resource "azurerm_api_management" "test" { publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } resource "azurerm_api_management_product" "test" { diff --git a/azurerm/internal/services/apimanagement/api_management_product_data_source_test.go b/azurerm/internal/services/apimanagement/api_management_product_data_source_test.go index 6c9b7200f743..232c7a92d82a 100644 --- a/azurerm/internal/services/apimanagement/api_management_product_data_source_test.go +++ b/azurerm/internal/services/apimanagement/api_management_product_data_source_test.go @@ -46,7 +46,7 @@ resource "azurerm_api_management" "test" { name = "acctestAM-%d" publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" location = azurerm_resource_group.test.location resource_group_name = azurerm_resource_group.test.name } diff --git a/azurerm/internal/services/apimanagement/api_management_product_group_resource.go b/azurerm/internal/services/apimanagement/api_management_product_group_resource.go index 2e4c58d2e577..d26680b65a8b 100644 --- a/azurerm/internal/services/apimanagement/api_management_product_group_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_product_group_resource.go @@ -5,12 +5,11 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" diff --git a/azurerm/internal/services/apimanagement/api_management_product_policy_resource.go b/azurerm/internal/services/apimanagement/api_management_product_policy_resource.go index c34ec2439fb5..e5ecf605b0bb 100644 --- a/azurerm/internal/services/apimanagement/api_management_product_policy_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_product_policy_resource.go @@ -6,13 +6,12 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" diff --git a/azurerm/internal/services/apimanagement/api_management_product_policy_resource_test.go b/azurerm/internal/services/apimanagement/api_management_product_policy_resource_test.go index 6055088389e1..f1cd75c5540e 100644 --- a/azurerm/internal/services/apimanagement/api_management_product_policy_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_product_policy_resource_test.go @@ -112,7 +112,7 @@ resource "azurerm_api_management" "test" { resource_group_name = azurerm_resource_group.test.name publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } resource "azurerm_api_management_product" "test" { @@ -163,7 +163,7 @@ resource "azurerm_api_management" "test" { resource_group_name = azurerm_resource_group.test.name publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } resource "azurerm_api_management_product" "test" { diff --git a/azurerm/internal/services/apimanagement/api_management_product_resource.go b/azurerm/internal/services/apimanagement/api_management_product_resource.go index 0c59191db95e..af4fc65a6cfd 100644 --- a/azurerm/internal/services/apimanagement/api_management_product_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_product_resource.go @@ -5,13 +5,12 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" diff --git a/azurerm/internal/services/apimanagement/api_management_product_resource_test.go b/azurerm/internal/services/apimanagement/api_management_product_resource_test.go index bb3aa748d047..a1da8e7c5781 100644 --- a/azurerm/internal/services/apimanagement/api_management_product_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_product_resource_test.go @@ -190,7 +190,7 @@ resource "azurerm_api_management" "test" { publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } resource "azurerm_api_management_product" "test" { @@ -238,7 +238,7 @@ resource "azurerm_api_management" "test" { publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } resource "azurerm_api_management_product" "test" { @@ -272,7 +272,7 @@ resource "azurerm_api_management" "test" { publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } resource "azurerm_api_management_product" "test" { @@ -306,7 +306,7 @@ resource "azurerm_api_management" "test" { publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } resource "azurerm_api_management_product" "test" { @@ -341,7 +341,7 @@ resource "azurerm_api_management" "test" { resource_group_name = azurerm_resource_group.test.name publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } resource "azurerm_api_management_product" "test" { diff --git a/azurerm/internal/services/apimanagement/api_management_property_resource.go b/azurerm/internal/services/apimanagement/api_management_property_resource.go index e6ed23092633..22e2f7072dad 100644 --- a/azurerm/internal/services/apimanagement/api_management_property_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_property_resource.go @@ -5,14 +5,13 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/migration" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/migration" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" diff --git a/azurerm/internal/services/apimanagement/api_management_property_resource_test.go b/azurerm/internal/services/apimanagement/api_management_property_resource_test.go index 66e345157b71..d611b8aa79b0 100644 --- a/azurerm/internal/services/apimanagement/api_management_property_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_property_resource_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -101,7 +100,7 @@ resource "azurerm_api_management" "test" { publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } resource "azurerm_api_management_property" "test" { @@ -133,7 +132,7 @@ resource "azurerm_api_management" "test" { publisher_name = "pub1" publisher_email = "pub1@email.com" - sku_name = "Developer_1" + sku_name = "Consumption_0" } resource "azurerm_api_management_property" "test" { diff --git a/azurerm/internal/services/apimanagement/api_management_redis_cache_resource.go b/azurerm/internal/services/apimanagement/api_management_redis_cache_resource.go new file mode 100644 index 000000000000..208224f846c3 --- /dev/null +++ b/azurerm/internal/services/apimanagement/api_management_redis_cache_resource.go @@ -0,0 +1,183 @@ +package apimanagement + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceApiManagementRedisCache() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceApiManagementRedisCacheCreateUpdate, + Read: resourceApiManagementRedisCacheRead, + Update: resourceApiManagementRedisCacheCreateUpdate, + Delete: resourceApiManagementRedisCacheDelete, + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.RedisCacheID(id) + return err + }), + + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.ApiManagementChildName, + }, + + "api_management_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.ApiManagementID, + }, + + "connection_string": { + Type: pluginsdk.TypeString, + Required: true, + Sensitive: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "redis_cache_id": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: azure.ValidateResourceID, + }, + + "description": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "cache_location": { + Type: pluginsdk.TypeString, + Optional: true, + Default: "default", + ValidateFunc: validate.RedisCacheLocation, + DiffSuppressFunc: location.DiffSuppressFunc, + }, + }, + } +} +func resourceApiManagementRedisCacheCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + client := meta.(*clients.Client).ApiManagement.CacheClient + ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) + defer cancel() + + name := d.Get("name").(string) + apimId, err := parse.ApiManagementID(d.Get("api_management_id").(string)) + if err != nil { + return err + } + id := parse.NewRedisCacheID(subscriptionId, apimId.ResourceGroup, apimId.ServiceName, name) + + if d.IsNewResource() { + existing, err := client.Get(ctx, apimId.ResourceGroup, apimId.ServiceName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("checking for existing %q: %+v", id, err) + } + } + if !utils.ResponseWasNotFound(existing.Response) { + return tf.ImportAsExistsError("azurerm_api_management_redis_cache", id.ID()) + } + } + + parameters := apimanagement.CacheContract{ + CacheContractProperties: &apimanagement.CacheContractProperties{ + ConnectionString: utils.String(d.Get("connection_string").(string)), + UseFromLocation: utils.String(location.Normalize(d.Get("cache_location").(string))), + }, + } + + if v, ok := d.GetOk("description"); ok && v.(string) != "" { + parameters.CacheContractProperties.Description = utils.String(v.(string)) + } + + if v, ok := d.GetOk("redis_cache_id"); ok && v.(string) != "" { + parameters.CacheContractProperties.ResourceID = utils.String(meta.(*clients.Client).Account.Environment.ResourceManagerEndpoint + v.(string)) + } + + // here we use "PUT" for updating, because `description` is not allowed to be empty string, Then we could not update to remove `description` by `PATCH` + if _, err := client.CreateOrUpdate(ctx, apimId.ResourceGroup, apimId.ServiceName, name, parameters, ""); err != nil { + return fmt.Errorf("creating/ updating %q: %+v", id, err) + } + + d.SetId(id.ID()) + return resourceApiManagementRedisCacheRead(d, meta) +} + +func resourceApiManagementRedisCacheRead(d *pluginsdk.ResourceData, meta interface{}) error { + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + client := meta.(*clients.Client).ApiManagement.CacheClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.RedisCacheID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, id.ResourceGroup, id.ServiceName, id.CacheName) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[INFO] apimanagement %q does not exist - removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("retrieving %q: %+v", id, err) + } + d.Set("name", id.CacheName) + d.Set("api_management_id", parse.NewApiManagementID(subscriptionId, id.ResourceGroup, id.ServiceName).ID()) + if props := resp.CacheContractProperties; props != nil { + d.Set("description", props.Description) + + cacheId := "" + if props.ResourceID != nil { + cacheId = strings.TrimPrefix(*props.ResourceID, meta.(*clients.Client).Account.Environment.ResourceManagerEndpoint) + } + d.Set("redis_cache_id", cacheId) + d.Set("cache_location", props.UseFromLocation) + } + return nil +} + +func resourceApiManagementRedisCacheDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).ApiManagement.CacheClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.RedisCacheID(d.Id()) + if err != nil { + return err + } + + if _, err := client.Delete(ctx, id.ResourceGroup, id.ServiceName, id.CacheName, "*"); err != nil { + return fmt.Errorf("deleting %q: %+v", id, err) + } + return nil +} diff --git a/azurerm/internal/services/apimanagement/api_management_redis_cache_resource_test.go b/azurerm/internal/services/apimanagement/api_management_redis_cache_resource_test.go new file mode 100644 index 000000000000..e399d8a1a945 --- /dev/null +++ b/azurerm/internal/services/apimanagement/api_management_redis_cache_resource_test.go @@ -0,0 +1,214 @@ +package apimanagement_test + +import ( + "context" + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type ApimanagementRedisCacheResource struct{} + +func TestAccApiManagementRedisCache_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_redis_cache", "test") + r := ApimanagementRedisCacheResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("connection_string"), + }) +} + +func TestAccApiManagementRedisCache_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_redis_cache", "test") + r := ApimanagementRedisCacheResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func TestAccApiManagementRedisCache_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_redis_cache", "test") + r := ApimanagementRedisCacheResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("connection_string"), + }) +} + +func TestAccApiManagementRedisCache_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_redis_cache", "test") + r := ApimanagementRedisCacheResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("connection_string"), + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("connection_string"), + { + Config: r.update(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("connection_string"), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("connection_string"), + }) +} + +func (r ApimanagementRedisCacheResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.RedisCacheID(state.ID) + if err != nil { + return nil, err + } + resp, err := client.ApiManagement.CacheClient.Get(ctx, id.ResourceGroup, id.ServiceName, id.CacheName) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return utils.Bool(false), nil + } + return nil, fmt.Errorf("retrieving %q %+v", id, err) + } + return utils.Bool(true), nil +} + +func (r ApimanagementRedisCacheResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-apim-%[1]d" + location = "%[2]s" +} + +resource "azurerm_api_management" "test" { + name = "acctestAM-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + publisher_name = "pub1" + publisher_email = "pub1@email.com" + sku_name = "Consumption_0" +} + +resource "azurerm_redis_cache" "test" { + name = "acctestRedis-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + capacity = 1 + family = "C" + sku_name = "Basic" + enable_non_ssl_port = false + minimum_tls_version = "1.2" + + redis_configuration { + } +} + +resource "azurerm_redis_cache" "test2" { + name = "acctestRedis2-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + capacity = 1 + family = "C" + sku_name = "Basic" + enable_non_ssl_port = false + minimum_tls_version = "1.2" + + redis_configuration { + } +} +`, data.RandomInteger, data.Locations.Primary) +} + +func (r ApimanagementRedisCacheResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_redis_cache" "test" { + name = "acctest-Redis-Cache-%d" + api_management_id = azurerm_api_management.test.id + connection_string = azurerm_redis_cache.test.primary_connection_string +} +`, r.template(data), data.RandomInteger) +} + +func (r ApimanagementRedisCacheResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_redis_cache" "import" { + name = azurerm_api_management_redis_cache.test.name + api_management_id = azurerm_api_management.test.id + connection_string = azurerm_redis_cache.test.primary_connection_string +} +`, r.basic(data)) +} + +func (r ApimanagementRedisCacheResource) complete(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_redis_cache" "test" { + name = "acctest-Redis-Cache-%d" + api_management_id = azurerm_api_management.test.id + connection_string = azurerm_redis_cache.test.primary_connection_string + description = "Redis cache instances" + redis_cache_id = azurerm_redis_cache.test.id + cache_location = "%s" +} +`, template, data.RandomInteger, data.Locations.Secondary) +} + +func (r ApimanagementRedisCacheResource) update(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_redis_cache" "test" { + name = "acctest-Redis-Cache-%d" + api_management_id = azurerm_api_management.test.id + connection_string = azurerm_redis_cache.test2.primary_connection_string + description = "Redis cache Update" + redis_cache_id = azurerm_redis_cache.test2.id + cache_location = "%s" +} +`, template, data.RandomInteger, data.Locations.Ternary) +} diff --git a/azurerm/internal/services/apimanagement/api_management_resource.go b/azurerm/internal/services/apimanagement/api_management_resource.go index ebabd0de34e9..d003b1c015f0 100644 --- a/azurerm/internal/services/apimanagement/api_management_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_resource.go @@ -13,6 +13,7 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" apimValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/validate" @@ -151,6 +152,24 @@ func resourceApiManagementService() *pluginsdk.Resource { }, }, + "client_certificate_enabled": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: false, + }, + + "gateway_disabled": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: false, + }, + + "min_api_version": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "notification_sender_email": { Type: pluginsdk.TypeString, Optional: true, @@ -162,7 +181,7 @@ func resourceApiManagementService() *pluginsdk.Resource { Optional: true, Elem: &pluginsdk.Resource{ Schema: map[string]*pluginsdk.Schema{ - "location": azure.SchemaLocation(), + "location": location.SchemaWithoutForceNew(), "virtual_network_configuration": { Type: pluginsdk.TypeList, @@ -173,7 +192,6 @@ func resourceApiManagementService() *pluginsdk.Resource { "subnet_id": { Type: pluginsdk.TypeString, Required: true, - ForceNew: true, ValidateFunc: azure.ValidateResourceID, }, }, @@ -485,6 +503,8 @@ func resourceApiManagementService() *pluginsdk.Resource { }, }, + "zones": azure.SchemaZones(), + "gateway_url": { Type: pluginsdk.TypeString, Computed: true, @@ -667,6 +687,33 @@ func resourceApiManagementServiceCreateUpdate(d *pluginsdk.ResourceData, meta in } } + if d.HasChange("client_certificate_enabled") { + enableClientCertificate := d.Get("client_certificate_enabled").(bool) + if enableClientCertificate && sku.Name != apimanagement.SkuTypeConsumption { + return fmt.Errorf("`client_certificate_enabled` is only supported when sku type is `Consumption`") + } + properties.ServiceProperties.EnableClientCertificate = utils.Bool(enableClientCertificate) + } + + gateWayDisabled := d.Get("gateway_disabled").(bool) + if gateWayDisabled && len(*properties.AdditionalLocations) == 0 { + return fmt.Errorf("`gateway_disabled` is only supported when `additional_location` is set") + } + properties.ServiceProperties.DisableGateway = utils.Bool(gateWayDisabled) + + if v, ok := d.GetOk("min_api_version"); ok { + properties.ServiceProperties.APIVersionConstraint = &apimanagement.APIVersionConstraint{ + MinAPIVersion: utils.String(v.(string)), + } + } + + if v := d.Get("zones").([]interface{}); len(v) > 0 { + if sku.Name != apimanagement.SkuTypePremium { + return fmt.Errorf("`zones` is only supported when sku type is `Premium`") + } + properties.Zones = azure.ExpandZones(v) + } + future, err := client.CreateOrUpdate(ctx, resourceGroup, name, properties) if err != nil { return fmt.Errorf("creating/updating API Management Service %q (Resource Group %q): %+v", name, resourceGroup, err) @@ -814,6 +861,8 @@ func resourceApiManagementServiceRead(d *pluginsdk.ResourceData, meta interface{ d.Set("public_ip_addresses", props.PublicIPAddresses) d.Set("private_ip_addresses", props.PrivateIPAddresses) d.Set("virtual_network_type", props.VirtualNetworkType) + d.Set("client_certificate_enabled", props.EnableClientCertificate) + d.Set("gateway_disabled", props.DisableGateway) if resp.Sku != nil && resp.Sku.Name != "" { if err := d.Set("security", flattenApiManagementSecurityCustomProperties(props.CustomProperties, resp.Sku.Name == apimanagement.SkuTypeConsumption)); err != nil { @@ -838,6 +887,12 @@ func resourceApiManagementServiceRead(d *pluginsdk.ResourceData, meta interface{ if err := d.Set("virtual_network_configuration", flattenApiManagementVirtualNetworkConfiguration(props.VirtualNetworkConfiguration)); err != nil { return fmt.Errorf("setting `virtual_network_configuration`: %+v", err) } + + var minApiVersion string + if props.APIVersionConstraint != nil && props.APIVersionConstraint.MinAPIVersion != nil { + minApiVersion = *props.APIVersionConstraint.MinAPIVersion + } + d.Set("min_api_version", minApiVersion) } if err := d.Set("sku_name", flattenApiManagementServiceSkuName(resp.Sku)); err != nil { @@ -848,6 +903,8 @@ func resourceApiManagementServiceRead(d *pluginsdk.ResourceData, meta interface{ return fmt.Errorf("setting `policy`: %+v", err) } + d.Set("zones", azure.FlattenZones(resp.Zones)) + if resp.Sku.Name != apimanagement.SkuTypeConsumption { signInSettings, err := signInClient.Get(ctx, resourceGroup, name) if err != nil { diff --git a/azurerm/internal/services/apimanagement/api_management_resource_test.go b/azurerm/internal/services/apimanagement/api_management_resource_test.go index e6bfeee8e709..e05779e7f903 100644 --- a/azurerm/internal/services/apimanagement/api_management_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_resource_test.go @@ -121,6 +121,46 @@ func TestAccApiManagement_complete(t *testing.T) { }) } +func TestAccApiManagement_completeUpdateAdditionalLocations(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management", "test") + r := ApiManagementResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("certificate", // not returned from API, sensitive + "hostname_configuration.0.portal.0.certificate", // not returned from API, sensitive + "hostname_configuration.0.portal.0.certificate_password", // not returned from API, sensitive + "hostname_configuration.0.developer_portal.0.certificate", // not returned from API, sensitive + "hostname_configuration.0.developer_portal.0.certificate_password", // not returned from API, sensitive + "hostname_configuration.0.proxy.0.certificate", // not returned from API, sensitive + "hostname_configuration.0.proxy.0.certificate_password", // not returned from API, sensitive + "hostname_configuration.0.proxy.1.certificate", // not returned from API, sensitive + "hostname_configuration.0.proxy.1.certificate_password", // not returned from API, sensitive + ), + { + Config: r.completeUpdateAdditionalLocations(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("certificate", // not returned from API, sensitive + "hostname_configuration.0.portal.0.certificate", // not returned from API, sensitive + "hostname_configuration.0.portal.0.certificate_password", // not returned from API, sensitive + "hostname_configuration.0.developer_portal.0.certificate", // not returned from API, sensitive + "hostname_configuration.0.developer_portal.0.certificate_password", // not returned from API, sensitive + "hostname_configuration.0.proxy.0.certificate", // not returned from API, sensitive + "hostname_configuration.0.proxy.0.certificate_password", // not returned from API, sensitive + "hostname_configuration.0.proxy.1.certificate", // not returned from API, sensitive + "hostname_configuration.0.proxy.1.certificate_password", // not returned from API, sensitive + ), + }) +} + func TestAccApiManagement_signInSignUpSettings(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_api_management", "test") r := ApiManagementResource{} @@ -292,6 +332,100 @@ func TestAccApiManagement_consumption(t *testing.T) { }) } +func TestAccApiManagement_clientCertificate(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management", "test") + r := ApiManagementResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.consumption(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.consumptionClientCertificateEnabled(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.consumptionClientCertificateDisabled(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccApiManagement_gatewayDiabled(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management", "test") + r := ApiManagementResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.multipleLocations(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.gatewayDiabled(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.multipleLocations(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccApiManagement_minApiVersion(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management", "test") + r := ApiManagementResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.consumption(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.consumptionMinApiVersion(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.consumptionMinApiVersionUpdate(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.consumption(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func (ApiManagementResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.ApiManagementID(state.ID) if err != nil { @@ -844,7 +978,7 @@ resource "azurerm_api_management" "test" { hostname_configuration { proxy { - host_name = "api.pluginsdk.io" + host_name = "api.terraform.io" certificate = filebase64("testdata/api_management_api_test.pfx") certificate_password = "terraform" default_ssl_binding = true @@ -852,25 +986,145 @@ resource "azurerm_api_management" "test" { } proxy { - host_name = "api2.pluginsdk.io" + host_name = "api2.terraform.io" certificate = filebase64("testdata/api_management_api2_test.pfx") certificate_password = "terraform" negotiate_client_certificate = true } portal { - host_name = "portal.pluginsdk.io" + host_name = "portal.terraform.io" certificate = filebase64("testdata/api_management_portal_test.pfx") certificate_password = "terraform" } developer_portal { - host_name = "developer-portal.pluginsdk.io" + host_name = "developer-portal.terraform.io" certificate = filebase64("testdata/api_management_developer_portal_test.pfx") } } - sku_name = "Premium_1" + sku_name = "Premium_2" + + zones = [1, 2] + + tags = { + "Acceptance" = "Test" + } + + location = azurerm_resource_group.test1.location + resource_group_name = azurerm_resource_group.test1.name +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.Locations.Secondary, data.RandomInteger, data.Locations.Ternary, data.RandomInteger) +} + +func (ApiManagementResource) completeUpdateAdditionalLocations(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test1" { + name = "acctestRG-api1-%d" + location = "%s" +} + +resource "azurerm_resource_group" "test2" { + name = "acctestRG-api2-%d" + location = "%s" +} + +resource "azurerm_resource_group" "test3" { + name = "acctestRG-api3-%d" + location = "%s" +} + +resource "azurerm_api_management" "test" { + name = "acctestAM-%d" + publisher_name = "pub1" + publisher_email = "pub1@email.com" + notification_sender_email = "notification@email.com" + + additional_location { + location = azurerm_resource_group.test2.location + } + + certificate { + encoded_certificate = filebase64("testdata/api_management_api_test.pfx") + certificate_password = "terraform" + store_name = "CertificateAuthority" + } + + certificate { + encoded_certificate = filebase64("testdata/api_management_api_test.pfx") + certificate_password = "terraform" + store_name = "Root" + } + + certificate { + encoded_certificate = filebase64("testdata/api_management_api_test.cer") + store_name = "Root" + } + + certificate { + encoded_certificate = filebase64("testdata/api_management_api_test.cer") + store_name = "CertificateAuthority" + } + + protocols { + enable_http2 = true + } + + security { + enable_backend_tls11 = true + enable_backend_ssl30 = true + enable_backend_tls10 = true + enable_frontend_ssl30 = true + enable_frontend_tls10 = true + enable_frontend_tls11 = true + tls_ecdhe_ecdsa_with_aes128_cbc_sha_ciphers_enabled = true + tls_ecdhe_ecdsa_with_aes256_cbc_sha_ciphers_enabled = true + tls_ecdhe_rsa_with_aes128_cbc_sha_ciphers_enabled = true + tls_ecdhe_rsa_with_aes256_cbc_sha_ciphers_enabled = true + tls_rsa_with_aes128_cbc_sha256_ciphers_enabled = true + tls_rsa_with_aes128_cbc_sha_ciphers_enabled = true + tls_rsa_with_aes128_gcm_sha256_ciphers_enabled = true + tls_rsa_with_aes256_cbc_sha256_ciphers_enabled = true + tls_rsa_with_aes256_cbc_sha_ciphers_enabled = true + triple_des_ciphers_enabled = true + } + + hostname_configuration { + proxy { + host_name = "api.terraform.io" + certificate = filebase64("testdata/api_management_api_test.pfx") + certificate_password = "terraform" + default_ssl_binding = true + negotiate_client_certificate = false + } + + proxy { + host_name = "api2.terraform.io" + certificate = filebase64("testdata/api_management_api2_test.pfx") + certificate_password = "terraform" + negotiate_client_certificate = true + } + + portal { + host_name = "portal.terraform.io" + certificate = filebase64("testdata/api_management_portal_test.pfx") + certificate_password = "terraform" + } + + developer_portal { + host_name = "developer-portal.terraform.io" + certificate = filebase64("testdata/api_management_developer_portal_test.pfx") + } + } + + sku_name = "Premium_2" + + zones = [1, 2] tags = { "Acceptance" = "Test" @@ -918,19 +1172,61 @@ resource "azurerm_subnet_network_security_group_association" "test" { network_security_group_id = azurerm_network_security_group.test.id } -resource "azurerm_network_security_rule" "port_3443" { - name = "Port_3443" +resource "azurerm_network_security_rule" "client" { + name = "Client_communication_to_API_Management" priority = 100 direction = "Inbound" access = "Allow" protocol = "Tcp" source_port_range = "*" + destination_port_range = "80" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "VirtualNetwork" + resource_group_name = azurerm_resource_group.test.name + network_security_group_name = azurerm_network_security_group.test.name +} + +resource "azurerm_network_security_rule" "secure_client" { + name = "Secure_Client_communication_to_API_Management" + priority = 110 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "443" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "VirtualNetwork" + resource_group_name = azurerm_resource_group.test.name + network_security_group_name = azurerm_network_security_group.test.name +} + +resource "azurerm_network_security_rule" "endpoint" { + name = "Management_endpoint_for_Azure_portal_and_Powershell" + priority = 120 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" destination_port_range = "3443" source_address_prefix = "ApiManagement" destination_address_prefix = "VirtualNetwork" resource_group_name = azurerm_resource_group.test.name network_security_group_name = azurerm_network_security_group.test.name } + +resource "azurerm_network_security_rule" "authenticate" { + name = "Authenticate_To_Azure_Active_Directory" + priority = 200 + direction = "Outbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_ranges = ["80", "443"] + source_address_prefix = "ApiManagement" + destination_address_prefix = "VirtualNetwork" + resource_group_name = azurerm_resource_group.test.name + network_security_group_name = azurerm_network_security_group.test.name +} `, data.RandomInteger, data.Locations.Primary) } @@ -990,13 +1286,41 @@ resource "azurerm_subnet_network_security_group_association" "test2" { network_security_group_id = azurerm_network_security_group.test2.id } -resource "azurerm_network_security_rule" "port_3443_2" { - name = "Port_3443" +resource "azurerm_network_security_rule" "client2" { + name = "Client_communication_to_API_Management" priority = 100 direction = "Inbound" access = "Allow" protocol = "Tcp" source_port_range = "*" + destination_port_range = "80" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "VirtualNetwork" + resource_group_name = azurerm_resource_group.test2.name + network_security_group_name = azurerm_network_security_group.test2.name +} + +resource "azurerm_network_security_rule" "secure_client2" { + name = "Secure_Client_communication_to_API_Management" + priority = 110 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "443" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "VirtualNetwork" + resource_group_name = azurerm_resource_group.test2.name + network_security_group_name = azurerm_network_security_group.test2.name +} + +resource "azurerm_network_security_rule" "endpoint2" { + name = "Management_endpoint_for_Azure_portal_and_Powershell" + priority = 120 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" destination_port_range = "3443" source_address_prefix = "ApiManagement" destination_address_prefix = "VirtualNetwork" @@ -1004,6 +1328,21 @@ resource "azurerm_network_security_rule" "port_3443_2" { network_security_group_name = azurerm_network_security_group.test2.name } +resource "azurerm_network_security_rule" "authenticate2" { + name = "Authenticate_To_Azure_Active_Directory" + priority = 200 + direction = "Outbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_ranges = ["80", "443"] + source_address_prefix = "ApiManagement" + destination_address_prefix = "VirtualNetwork" + resource_group_name = azurerm_resource_group.test2.name + network_security_group_name = azurerm_network_security_group.test2.name +} + + resource "azurerm_api_management" "test" { name = "acctestAM-%[2]d" location = azurerm_resource_group.test.location @@ -1058,7 +1397,7 @@ resource "azurerm_api_management" "test" { identity { type = "UserAssigned" identity_ids = [ - azurerm_user_assigned_identity.test.principal_id, + azurerm_user_assigned_identity.test.id, ] } } @@ -1152,11 +1491,14 @@ func (ApiManagementResource) identitySystemAssignedUpdateHostnameConfigurationsT provider "azurerm" { features {} } + resource "azurerm_resource_group" "test" { name = "acctestRG-%[1]d" location = "%[2]s" } + data "azurerm_client_config" "current" {} + resource "azurerm_key_vault" "test" { name = "acctestKV-%[4]s" location = azurerm_resource_group.test.location @@ -1164,6 +1506,7 @@ resource "azurerm_key_vault" "test" { tenant_id = data.azurerm_client_config.current.tenant_id sku_name = "standard" } + resource "azurerm_key_vault_access_policy" "test" { key_vault_id = azurerm_key_vault.test.id tenant_id = data.azurerm_client_config.current.tenant_id @@ -1181,6 +1524,7 @@ resource "azurerm_key_vault_access_policy" "test" { "Manageissuers", "Setissuers", "Update", + "Purge", ] secret_permissions = [ "Delete", @@ -1189,6 +1533,7 @@ resource "azurerm_key_vault_access_policy" "test" { "Purge", ] } + resource "azurerm_key_vault_access_policy" "test2" { key_vault_id = azurerm_key_vault.test.id tenant_id = azurerm_api_management.test.identity[0].tenant_id @@ -1198,6 +1543,7 @@ resource "azurerm_key_vault_access_policy" "test2" { "List", ] } + resource "azurerm_key_vault_certificate" "test" { depends_on = [azurerm_key_vault_access_policy.test] name = "acctestKVCert-%[3]d" @@ -1329,6 +1675,149 @@ resource "azurerm_api_management" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger) } +func (ApiManagementResource) consumptionClientCertificateEnabled(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_api_management" "test" { + name = "acctestAM-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + publisher_name = "pub1" + publisher_email = "pub1@email.com" + sku_name = "Consumption_0" + client_certificate_enabled = true +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +} + +func (ApiManagementResource) consumptionClientCertificateDisabled(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_api_management" "test" { + name = "acctestAM-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + publisher_name = "pub1" + publisher_email = "pub1@email.com" + sku_name = "Consumption_0" + client_certificate_enabled = false +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +} + +func (ApiManagementResource) multipleLocations(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_api_management" "test" { + name = "acctestAM-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + publisher_name = "pub1" + publisher_email = "pub1@email.com" + sku_name = "Premium_1" + additional_location { + location = "%s" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.Locations.Secondary) +} + +func (ApiManagementResource) gatewayDiabled(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_api_management" "test" { + name = "acctestAM-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + publisher_name = "pub1" + publisher_email = "pub1@email.com" + sku_name = "Premium_1" + gateway_disabled = true + additional_location { + location = "%s" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.Locations.Secondary) +} + +func (ApiManagementResource) consumptionMinApiVersion(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_api_management" "test" { + name = "acctestAM-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + publisher_name = "pub1" + publisher_email = "pub1@email.com" + sku_name = "Consumption_0" + min_api_version = "2019-12-01" +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +} + +func (ApiManagementResource) consumptionMinApiVersionUpdate(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_api_management" "test" { + name = "acctestAM-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + publisher_name = "pub1" + publisher_email = "pub1@email.com" + sku_name = "Consumption_0" + min_api_version = "2020-12-01" +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +} + func (ApiManagementResource) tenantAccess(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { diff --git a/azurerm/internal/services/apimanagement/api_management_subscription_resource.go b/azurerm/internal/services/apimanagement/api_management_subscription_resource.go index 6a3208572574..433438ce6a24 100644 --- a/azurerm/internal/services/apimanagement/api_management_subscription_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_subscription_resource.go @@ -62,12 +62,20 @@ func resourceApiManagementSubscription() *pluginsdk.Resource { ValidateFunc: validation.StringIsNotEmpty, }, - // TODO this now sets the scope property - either a scope block needs adding or additional properties `api_id` and maybe `all_apis` "product_id": { - Type: pluginsdk.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validate.ProductID, + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validate.ProductID, + ConflictsWith: []string{"api_id"}, + }, + + "api_id": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validate.ApiID, + ConflictsWith: []string{"product_id"}, }, "state": { @@ -138,14 +146,25 @@ func resourceApiManagementSubscriptionCreateUpdate(d *pluginsdk.ResourceData, me } displayName := d.Get("display_name").(string) - productId := d.Get("product_id").(string) + productId, productSet := d.GetOk("product_id") + apiId, apiSet := d.GetOk("api_id") state := d.Get("state").(string) allowTracing := d.Get("allow_tracing").(bool) + var scope string + switch { + case productSet: + scope = productId.(string) + case apiSet: + scope = apiId.(string) + default: + scope = "all_apis" + } + params := apimanagement.SubscriptionCreateParameters{ SubscriptionCreateParameterProperties: &apimanagement.SubscriptionCreateParameterProperties{ DisplayName: utils.String(displayName), - Scope: utils.String(productId), + Scope: utils.String(scope), State: apimanagement.SubscriptionState(state), AllowTracing: utils.Bool(allowTracing), }, @@ -207,14 +226,22 @@ func resourceApiManagementSubscriptionRead(d *pluginsdk.ResourceData, meta inter d.Set("display_name", props.DisplayName) d.Set("state", string(props.State)) productId := "" + apiId := "" if *props.Scope != "" { + // the scope is either a product or api id or "all_apis" constant parseId, err := parse.ProductID(*props.Scope) - if err != nil { - return fmt.Errorf("parsing product id %q: %+v", *props.Scope, err) + if err == nil { + productId = parseId.ID() + } else { + parsedApiId, err := parse.ApiID(*props.Scope) + if err != nil { + return fmt.Errorf("parsing scope into product/ api id %q: %+v", *props.Scope, err) + } + apiId = parsedApiId.ID() } - productId = parseId.ID() } d.Set("product_id", productId) + d.Set("api_id", apiId) d.Set("user_id", props.OwnerID) d.Set("allow_tracing", props.AllowTracing) } diff --git a/azurerm/internal/services/apimanagement/api_management_subscription_resource_test.go b/azurerm/internal/services/apimanagement/api_management_subscription_resource_test.go index 5b53d78f98d8..8c9c59b4c5ec 100644 --- a/azurerm/internal/services/apimanagement/api_management_subscription_resource_test.go +++ b/azurerm/internal/services/apimanagement/api_management_subscription_resource_test.go @@ -5,11 +5,10 @@ import ( "fmt" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -142,6 +141,24 @@ func TestAccApiManagementSubscription_withoutUser(t *testing.T) { }) } +func TestAccApiManagementSubscription_withApiId(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_api_management_subscription", "test") + r := ApiManagementSubscriptionResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.withApiId(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("api_id").Exists(), + check.That(data.ResourceName).Key("product_id").HasValue(""), + check.That(data.ResourceName).Key("user_id").HasValue(""), + ), + }, + data.ImportStep(), + }) +} + func (ApiManagementSubscriptionResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.SubscriptionID(state.ID) if err != nil { @@ -233,6 +250,30 @@ resource "azurerm_api_management_subscription" "test" { `, r.template(data)) } +func (ApiManagementSubscriptionResource) withApiId(data acceptance.TestData) string { + template := ApiManagementSubscriptionResource{}.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_api_management_api" "test" { + name = "TestApi" + resource_group_name = azurerm_api_management.test.resource_group_name + api_management_name = azurerm_api_management.test.name + revision = "1" + protocols = ["https"] + display_name = "Test API" + path = "test" +} + +resource "azurerm_api_management_subscription" "test" { + resource_group_name = azurerm_api_management.test.resource_group_name + api_management_name = azurerm_api_management.test.name + api_id = azurerm_api_management_api.test.id + display_name = "Butter Parser API Enterprise Edition" +} +`, template) +} + func (ApiManagementSubscriptionResource) template(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { diff --git a/azurerm/internal/services/apimanagement/api_management_user_resource.go b/azurerm/internal/services/apimanagement/api_management_user_resource.go index c1aed49945b1..347c90419a48 100644 --- a/azurerm/internal/services/apimanagement/api_management_user_resource.go +++ b/azurerm/internal/services/apimanagement/api_management_user_resource.go @@ -5,13 +5,12 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" - "github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2020-12-01/apimanagement" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/schemaz" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" diff --git a/azurerm/internal/services/apimanagement/client/client.go b/azurerm/internal/services/apimanagement/client/client.go index 77419235bb95..035bc88b5346 100644 --- a/azurerm/internal/services/apimanagement/client/client.go +++ b/azurerm/internal/services/apimanagement/client/client.go @@ -11,13 +11,17 @@ type Client struct { ApiPoliciesClient *apimanagement.APIPolicyClient ApiOperationsClient *apimanagement.APIOperationClient ApiOperationPoliciesClient *apimanagement.APIOperationPolicyClient + ApiReleasesClient *apimanagement.APIReleaseClient ApiSchemasClient *apimanagement.APISchemaClient ApiVersionSetClient *apimanagement.APIVersionSetClient AuthorizationServersClient *apimanagement.AuthorizationServerClient BackendClient *apimanagement.BackendClient + CacheClient *apimanagement.CacheClient CertificatesClient *apimanagement.CertificateClient DiagnosticClient *apimanagement.DiagnosticClient EmailTemplateClient *apimanagement.EmailTemplateClient + GatewayClient *apimanagement.GatewayClient + GatewayApisClient *apimanagement.GatewayAPIClient GroupClient *apimanagement.GroupClient GroupUsersClient *apimanagement.GroupUserClient IdentityProviderClient *apimanagement.IdentityProviderClient @@ -33,6 +37,7 @@ type Client struct { SignInClient *apimanagement.SignInSettingsClient SignUpClient *apimanagement.SignUpSettingsClient SubscriptionsClient *apimanagement.SubscriptionClient + TagClient *apimanagement.TagClient TenantAccessClient *apimanagement.TenantAccessClient UsersClient *apimanagement.UserClient } @@ -53,6 +58,9 @@ func NewClient(o *common.ClientOptions) *Client { apiOperationPoliciesClient := apimanagement.NewAPIOperationPolicyClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&apiOperationPoliciesClient.Client, o.ResourceManagerAuthorizer) + apiReleasesClient := apimanagement.NewAPIReleaseClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&apiReleasesClient.Client, o.ResourceManagerAuthorizer) + apiSchemasClient := apimanagement.NewAPISchemaClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&apiSchemasClient.Client, o.ResourceManagerAuthorizer) @@ -65,6 +73,9 @@ func NewClient(o *common.ClientOptions) *Client { backendClient := apimanagement.NewBackendClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&backendClient.Client, o.ResourceManagerAuthorizer) + cacheClient := apimanagement.NewCacheClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&cacheClient.Client, o.ResourceManagerAuthorizer) + certificatesClient := apimanagement.NewCertificateClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&certificatesClient.Client, o.ResourceManagerAuthorizer) @@ -74,6 +85,12 @@ func NewClient(o *common.ClientOptions) *Client { emailTemplateClient := apimanagement.NewEmailTemplateClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&emailTemplateClient.Client, o.ResourceManagerAuthorizer) + gatewayClient := apimanagement.NewGatewayClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&gatewayClient.Client, o.ResourceManagerAuthorizer) + + gatewayApisClient := apimanagement.NewGatewayAPIClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&gatewayApisClient.Client, o.ResourceManagerAuthorizer) + groupClient := apimanagement.NewGroupClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&groupClient.Client, o.ResourceManagerAuthorizer) @@ -119,6 +136,9 @@ func NewClient(o *common.ClientOptions) *Client { subscriptionsClient := apimanagement.NewSubscriptionClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&subscriptionsClient.Client, o.ResourceManagerAuthorizer) + tagClient := apimanagement.NewTagClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&tagClient.Client, o.ResourceManagerAuthorizer) + tenantAccessClient := apimanagement.NewTenantAccessClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&tenantAccessClient.Client, o.ResourceManagerAuthorizer) @@ -131,13 +151,17 @@ func NewClient(o *common.ClientOptions) *Client { ApiPoliciesClient: &apiPoliciesClient, ApiOperationsClient: &apiOperationsClient, ApiOperationPoliciesClient: &apiOperationPoliciesClient, + ApiReleasesClient: &apiReleasesClient, ApiSchemasClient: &apiSchemasClient, ApiVersionSetClient: &apiVersionSetClient, AuthorizationServersClient: &authorizationServersClient, BackendClient: &backendClient, + CacheClient: &cacheClient, CertificatesClient: &certificatesClient, DiagnosticClient: &diagnosticClient, EmailTemplateClient: &emailTemplateClient, + GatewayClient: &gatewayClient, + GatewayApisClient: &gatewayApisClient, GroupClient: &groupClient, GroupUsersClient: &groupUsersClient, IdentityProviderClient: &identityProviderClient, @@ -153,6 +177,7 @@ func NewClient(o *common.ClientOptions) *Client { SignInClient: &signInClient, SignUpClient: &signUpClient, SubscriptionsClient: &subscriptionsClient, + TagClient: &tagClient, TenantAccessClient: &tenantAccessClient, UsersClient: &usersClient, } diff --git a/azurerm/internal/services/apimanagement/parse/api_release.go b/azurerm/internal/services/apimanagement/parse/api_release.go new file mode 100644 index 000000000000..f5e980516c7e --- /dev/null +++ b/azurerm/internal/services/apimanagement/parse/api_release.go @@ -0,0 +1,81 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type ApiReleaseId struct { + SubscriptionId string + ResourceGroup string + ServiceName string + ApiName string + ReleaseName string +} + +func NewApiReleaseID(subscriptionId, resourceGroup, serviceName, apiName, releaseName string) ApiReleaseId { + return ApiReleaseId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + ServiceName: serviceName, + ApiName: apiName, + ReleaseName: releaseName, + } +} + +func (id ApiReleaseId) String() string { + segments := []string{ + fmt.Sprintf("Release Name %q", id.ReleaseName), + fmt.Sprintf("Api Name %q", id.ApiName), + fmt.Sprintf("Service Name %q", id.ServiceName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Api Release", segmentsStr) +} + +func (id ApiReleaseId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ApiManagement/service/%s/apis/%s/releases/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.ServiceName, id.ApiName, id.ReleaseName) +} + +// ApiReleaseID parses a ApiRelease ID into an ApiReleaseId struct +func ApiReleaseID(input string) (*ApiReleaseId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := ApiReleaseId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.ServiceName, err = id.PopSegment("service"); err != nil { + return nil, err + } + if resourceId.ApiName, err = id.PopSegment("apis"); err != nil { + return nil, err + } + if resourceId.ReleaseName, err = id.PopSegment("releases"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/apimanagement/parse/api_release_test.go b/azurerm/internal/services/apimanagement/parse/api_release_test.go new file mode 100644 index 000000000000..7025346cf062 --- /dev/null +++ b/azurerm/internal/services/apimanagement/parse/api_release_test.go @@ -0,0 +1,144 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = ApiReleaseId{} + +func TestApiReleaseIDFormatter(t *testing.T) { + actual := NewApiReleaseID("12345678-1234-9876-4563-123456789012", "resGroup1", "service1", "api1", "release1").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/api1/releases/release1" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestApiReleaseID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ApiReleaseId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing ServiceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/", + Error: true, + }, + + { + // missing value for ServiceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/", + Error: true, + }, + + { + // missing ApiName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/", + Error: true, + }, + + { + // missing value for ApiName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/", + Error: true, + }, + + { + // missing ReleaseName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/api1/", + Error: true, + }, + + { + // missing value for ReleaseName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/api1/releases/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/api1/releases/release1", + Expected: &ApiReleaseId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + ServiceName: "service1", + ApiName: "api1", + ReleaseName: "release1", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.APIMANAGEMENT/SERVICE/SERVICE1/APIS/API1/RELEASES/RELEASE1", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ApiReleaseID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + if actual.ApiName != v.Expected.ApiName { + t.Fatalf("Expected %q but got %q for ApiName", v.Expected.ApiName, actual.ApiName) + } + if actual.ReleaseName != v.Expected.ReleaseName { + t.Fatalf("Expected %q but got %q for ReleaseName", v.Expected.ReleaseName, actual.ReleaseName) + } + } +} diff --git a/azurerm/internal/services/apimanagement/parse/gateway.go b/azurerm/internal/services/apimanagement/parse/gateway.go new file mode 100644 index 000000000000..28f9385522c7 --- /dev/null +++ b/azurerm/internal/services/apimanagement/parse/gateway.go @@ -0,0 +1,75 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type GatewayId struct { + SubscriptionId string + ResourceGroup string + ServiceName string + Name string +} + +func NewGatewayID(subscriptionId, resourceGroup, serviceName, name string) GatewayId { + return GatewayId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + ServiceName: serviceName, + Name: name, + } +} + +func (id GatewayId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Service Name %q", id.ServiceName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Gateway", segmentsStr) +} + +func (id GatewayId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ApiManagement/service/%s/gateways/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.ServiceName, id.Name) +} + +// GatewayID parses a Gateway ID into an GatewayId struct +func GatewayID(input string) (*GatewayId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := GatewayId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.ServiceName, err = id.PopSegment("service"); err != nil { + return nil, err + } + if resourceId.Name, err = id.PopSegment("gateways"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/apimanagement/parse/gateway_api.go b/azurerm/internal/services/apimanagement/parse/gateway_api.go new file mode 100644 index 000000000000..390291ae72a6 --- /dev/null +++ b/azurerm/internal/services/apimanagement/parse/gateway_api.go @@ -0,0 +1,81 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type GatewayApiId struct { + SubscriptionId string + ResourceGroup string + ServiceName string + GatewayName string + ApiName string +} + +func NewGatewayApiID(subscriptionId, resourceGroup, serviceName, gatewayName, apiName string) GatewayApiId { + return GatewayApiId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + ServiceName: serviceName, + GatewayName: gatewayName, + ApiName: apiName, + } +} + +func (id GatewayApiId) String() string { + segments := []string{ + fmt.Sprintf("Api Name %q", id.ApiName), + fmt.Sprintf("Gateway Name %q", id.GatewayName), + fmt.Sprintf("Service Name %q", id.ServiceName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Gateway Api", segmentsStr) +} + +func (id GatewayApiId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ApiManagement/service/%s/gateways/%s/apis/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.ServiceName, id.GatewayName, id.ApiName) +} + +// GatewayApiID parses a GatewayApi ID into an GatewayApiId struct +func GatewayApiID(input string) (*GatewayApiId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := GatewayApiId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.ServiceName, err = id.PopSegment("service"); err != nil { + return nil, err + } + if resourceId.GatewayName, err = id.PopSegment("gateways"); err != nil { + return nil, err + } + if resourceId.ApiName, err = id.PopSegment("apis"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/apimanagement/parse/gateway_api_test.go b/azurerm/internal/services/apimanagement/parse/gateway_api_test.go new file mode 100644 index 000000000000..80550acf6012 --- /dev/null +++ b/azurerm/internal/services/apimanagement/parse/gateway_api_test.go @@ -0,0 +1,144 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = GatewayApiId{} + +func TestGatewayApiIDFormatter(t *testing.T) { + actual := NewGatewayApiID("12345678-1234-9876-4563-123456789012", "resGroup1", "service1", "gateway1", "api1").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/gateways/gateway1/apis/api1" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestGatewayApiID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *GatewayApiId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing ServiceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/", + Error: true, + }, + + { + // missing value for ServiceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/", + Error: true, + }, + + { + // missing GatewayName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/", + Error: true, + }, + + { + // missing value for GatewayName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/gateways/", + Error: true, + }, + + { + // missing ApiName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/gateways/gateway1/", + Error: true, + }, + + { + // missing value for ApiName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/gateways/gateway1/apis/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/gateways/gateway1/apis/api1", + Expected: &GatewayApiId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + ServiceName: "service1", + GatewayName: "gateway1", + ApiName: "api1", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.APIMANAGEMENT/SERVICE/SERVICE1/GATEWAYS/GATEWAY1/APIS/API1", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := GatewayApiID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + if actual.GatewayName != v.Expected.GatewayName { + t.Fatalf("Expected %q but got %q for GatewayName", v.Expected.GatewayName, actual.GatewayName) + } + if actual.ApiName != v.Expected.ApiName { + t.Fatalf("Expected %q but got %q for ApiName", v.Expected.ApiName, actual.ApiName) + } + } +} diff --git a/azurerm/internal/services/apimanagement/parse/gateway_test.go b/azurerm/internal/services/apimanagement/parse/gateway_test.go new file mode 100644 index 000000000000..9069fb1bb9da --- /dev/null +++ b/azurerm/internal/services/apimanagement/parse/gateway_test.go @@ -0,0 +1,128 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = GatewayId{} + +func TestGatewayIDFormatter(t *testing.T) { + actual := NewGatewayID("12345678-1234-9876-4563-123456789012", "resGroup1", "service1", "gateway1").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/gateways/gateway1" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestGatewayID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *GatewayId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing ServiceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/", + Error: true, + }, + + { + // missing value for ServiceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/gateways/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/gateways/gateway1", + Expected: &GatewayId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + ServiceName: "service1", + Name: "gateway1", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.APIMANAGEMENT/SERVICE/SERVICE1/GATEWAYS/GATEWAY1", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := GatewayID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/apimanagement/parse/operation_tag.go b/azurerm/internal/services/apimanagement/parse/operation_tag.go new file mode 100644 index 000000000000..a2ec10dac29c --- /dev/null +++ b/azurerm/internal/services/apimanagement/parse/operation_tag.go @@ -0,0 +1,87 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type OperationTagId struct { + SubscriptionId string + ResourceGroup string + ServiceName string + ApiName string + OperationName string + TagName string +} + +func NewOperationTagID(subscriptionId, resourceGroup, serviceName, apiName, operationName, tagName string) OperationTagId { + return OperationTagId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + ServiceName: serviceName, + ApiName: apiName, + OperationName: operationName, + TagName: tagName, + } +} + +func (id OperationTagId) String() string { + segments := []string{ + fmt.Sprintf("Tag Name %q", id.TagName), + fmt.Sprintf("Operation Name %q", id.OperationName), + fmt.Sprintf("Api Name %q", id.ApiName), + fmt.Sprintf("Service Name %q", id.ServiceName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Operation Tag", segmentsStr) +} + +func (id OperationTagId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ApiManagement/service/%s/apis/%s/operations/%s/tags/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.ServiceName, id.ApiName, id.OperationName, id.TagName) +} + +// OperationTagID parses a OperationTag ID into an OperationTagId struct +func OperationTagID(input string) (*OperationTagId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := OperationTagId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.ServiceName, err = id.PopSegment("service"); err != nil { + return nil, err + } + if resourceId.ApiName, err = id.PopSegment("apis"); err != nil { + return nil, err + } + if resourceId.OperationName, err = id.PopSegment("operations"); err != nil { + return nil, err + } + if resourceId.TagName, err = id.PopSegment("tags"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/apimanagement/parse/operation_tag_test.go b/azurerm/internal/services/apimanagement/parse/operation_tag_test.go new file mode 100644 index 000000000000..8d2b641a2fb6 --- /dev/null +++ b/azurerm/internal/services/apimanagement/parse/operation_tag_test.go @@ -0,0 +1,160 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = OperationTagId{} + +func TestOperationTagIDFormatter(t *testing.T) { + actual := NewOperationTagID("12345678-1234-9876-4563-123456789012", "resGroup1", "service1", "api1", "operation1", "tag1").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/api1/operations/operation1/tags/tag1" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestOperationTagID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *OperationTagId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing ServiceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/", + Error: true, + }, + + { + // missing value for ServiceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/", + Error: true, + }, + + { + // missing ApiName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/", + Error: true, + }, + + { + // missing value for ApiName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/", + Error: true, + }, + + { + // missing OperationName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/api1/", + Error: true, + }, + + { + // missing value for OperationName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/api1/operations/", + Error: true, + }, + + { + // missing TagName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/api1/operations/operation1/", + Error: true, + }, + + { + // missing value for TagName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/api1/operations/operation1/tags/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/api1/operations/operation1/tags/tag1", + Expected: &OperationTagId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + ServiceName: "service1", + ApiName: "api1", + OperationName: "operation1", + TagName: "tag1", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.APIMANAGEMENT/SERVICE/SERVICE1/APIS/API1/OPERATIONS/OPERATION1/TAGS/TAG1", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := OperationTagID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + if actual.ApiName != v.Expected.ApiName { + t.Fatalf("Expected %q but got %q for ApiName", v.Expected.ApiName, actual.ApiName) + } + if actual.OperationName != v.Expected.OperationName { + t.Fatalf("Expected %q but got %q for OperationName", v.Expected.OperationName, actual.OperationName) + } + if actual.TagName != v.Expected.TagName { + t.Fatalf("Expected %q but got %q for TagName", v.Expected.TagName, actual.TagName) + } + } +} diff --git a/azurerm/internal/services/apimanagement/parse/redis_cache.go b/azurerm/internal/services/apimanagement/parse/redis_cache.go new file mode 100644 index 000000000000..8ee299db3379 --- /dev/null +++ b/azurerm/internal/services/apimanagement/parse/redis_cache.go @@ -0,0 +1,75 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type RedisCacheId struct { + SubscriptionId string + ResourceGroup string + ServiceName string + CacheName string +} + +func NewRedisCacheID(subscriptionId, resourceGroup, serviceName, cacheName string) RedisCacheId { + return RedisCacheId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + ServiceName: serviceName, + CacheName: cacheName, + } +} + +func (id RedisCacheId) String() string { + segments := []string{ + fmt.Sprintf("Cache Name %q", id.CacheName), + fmt.Sprintf("Service Name %q", id.ServiceName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Redis Cache", segmentsStr) +} + +func (id RedisCacheId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ApiManagement/service/%s/caches/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.ServiceName, id.CacheName) +} + +// RedisCacheID parses a RedisCache ID into an RedisCacheId struct +func RedisCacheID(input string) (*RedisCacheId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := RedisCacheId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.ServiceName, err = id.PopSegment("service"); err != nil { + return nil, err + } + if resourceId.CacheName, err = id.PopSegment("caches"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/apimanagement/parse/redis_cache_test.go b/azurerm/internal/services/apimanagement/parse/redis_cache_test.go new file mode 100644 index 000000000000..44c95512ccfc --- /dev/null +++ b/azurerm/internal/services/apimanagement/parse/redis_cache_test.go @@ -0,0 +1,128 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = RedisCacheId{} + +func TestRedisCacheIDFormatter(t *testing.T) { + actual := NewRedisCacheID("12345678-1234-9876-4563-123456789012", "resGroup1", "service1", "redisCache1").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/caches/redisCache1" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestRedisCacheID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *RedisCacheId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing ServiceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/", + Error: true, + }, + + { + // missing value for ServiceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/", + Error: true, + }, + + { + // missing CacheName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/", + Error: true, + }, + + { + // missing value for CacheName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/caches/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/caches/redisCache1", + Expected: &RedisCacheId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + ServiceName: "service1", + CacheName: "redisCache1", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.APIMANAGEMENT/SERVICE/SERVICE1/CACHES/REDISCACHE1", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := RedisCacheID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + if actual.CacheName != v.Expected.CacheName { + t.Fatalf("Expected %q but got %q for CacheName", v.Expected.CacheName, actual.CacheName) + } + } +} diff --git a/azurerm/internal/services/apimanagement/registration.go b/azurerm/internal/services/apimanagement/registration.go index 33df5d6e44b1..718e5906bbf3 100644 --- a/azurerm/internal/services/apimanagement/registration.go +++ b/azurerm/internal/services/apimanagement/registration.go @@ -24,6 +24,7 @@ func (r Registration) SupportedDataSources() map[string]*pluginsdk.Resource { "azurerm_api_management": dataSourceApiManagementService(), "azurerm_api_management_api": dataSourceApiManagementApi(), "azurerm_api_management_api_version_set": dataSourceApiManagementApiVersionSet(), + "azurerm_api_management_gateway": dataSourceApiManagementGateway(), "azurerm_api_management_group": dataSourceApiManagementGroup(), "azurerm_api_management_product": dataSourceApiManagementProduct(), "azurerm_api_management_user": dataSourceApiManagementUser(), @@ -37,8 +38,10 @@ func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { "azurerm_api_management_api": resourceApiManagementApi(), "azurerm_api_management_api_diagnostic": resourceApiManagementApiDiagnostic(), "azurerm_api_management_api_operation": resourceApiManagementApiOperation(), + "azurerm_api_management_api_operation_tag": resourceApiManagementApiOperationTag(), "azurerm_api_management_api_operation_policy": resourceApiManagementApiOperationPolicy(), "azurerm_api_management_api_policy": resourceApiManagementApiPolicy(), + "azurerm_api_management_api_release": resourceApiManagementApiRelease(), "azurerm_api_management_api_schema": resourceApiManagementApiSchema(), "azurerm_api_management_api_version_set": resourceApiManagementApiVersionSet(), "azurerm_api_management_authorization_server": resourceApiManagementAuthorizationServer(), @@ -47,6 +50,8 @@ func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { "azurerm_api_management_custom_domain": resourceApiManagementCustomDomain(), "azurerm_api_management_diagnostic": resourceApiManagementDiagnostic(), "azurerm_api_management_email_template": resourceApiManagementEmailTemplate(), + "azurerm_api_management_gateway": resourceApiManagementGateway(), + "azurerm_api_management_gateway_api": resourceApiManagementGatewayApi(), "azurerm_api_management_group": resourceApiManagementGroup(), "azurerm_api_management_group_user": resourceApiManagementGroupUser(), "azurerm_api_management_identity_provider_aad": resourceApiManagementIdentityProviderAAD(), @@ -64,6 +69,7 @@ func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { "azurerm_api_management_product_group": resourceApiManagementProductGroup(), "azurerm_api_management_product_policy": resourceApiManagementProductPolicy(), "azurerm_api_management_property": resourceApiManagementProperty(), + "azurerm_api_management_redis_cache": resourceApiManagementRedisCache(), "azurerm_api_management_subscription": resourceApiManagementSubscription(), "azurerm_api_management_user": resourceApiManagementUser(), } diff --git a/azurerm/internal/services/apimanagement/resourceids.go b/azurerm/internal/services/apimanagement/resourceids.go index 92beb576d3dc..9c534cc468fe 100644 --- a/azurerm/internal/services/apimanagement/resourceids.go +++ b/azurerm/internal/services/apimanagement/resourceids.go @@ -10,10 +10,13 @@ package apimanagement //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=ApiVersionSet -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apiVersionSets/apiVersionSet1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=AuthorizationServer -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/authorizationServers/authorizationserver1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=Backend -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/backends/backend1 +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=RedisCache -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/caches/redisCache1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=Certificate -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/certificates/certificate1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=CustomDomain -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/customDomains/customdomain //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=Diagnostic -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/diagnostics/diagnostic1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=EmailTemplate -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/templates/template1 +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=Gateway -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/gateways/gateway1 +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=GatewayApi -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/gateways/gateway1/apis/api1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=Group -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/groups/group1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=GroupUser -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/groups/group1/users/user1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=IdentityProvider -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/identityProviders/identityProvider1 @@ -28,3 +31,5 @@ package apimanagement //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=Property -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/namedValues/namedvalue1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=Subscription -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/subscriptions/subscription1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=User -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/users/user1 +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=OperationTag -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/api1/operations/operation1/tags/tag1 +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=ApiRelease -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/api1/releases/release1 diff --git a/azurerm/internal/services/apimanagement/schema.go b/azurerm/internal/services/apimanagement/schema.go index d74023d46919..e9bbfb902736 100644 --- a/azurerm/internal/services/apimanagement/schema.go +++ b/azurerm/internal/services/apimanagement/schema.go @@ -3,9 +3,8 @@ package apimanagement import ( keyVaultValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/keyvault/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" ) func apiManagementResourceHostnameSchema() map[string]*pluginsdk.Schema { diff --git a/azurerm/internal/services/apimanagement/schemaz/api_management.go b/azurerm/internal/services/apimanagement/schemaz/api_management.go index 06df10b3a1d0..ac84ffe2a698 100644 --- a/azurerm/internal/services/apimanagement/schemaz/api_management.go +++ b/azurerm/internal/services/apimanagement/schemaz/api_management.go @@ -110,19 +110,19 @@ func SchemaApiManagementOperationRepresentation() *pluginsdk.Schema { } } -func ExpandApiManagementOperationRepresentation(input []interface{}) (*[]apimanagement.RepresentationContract, error) { +func ExpandApiManagementOperationRepresentation(d *pluginsdk.ResourceData, schemaPath string, input []interface{}) (*[]apimanagement.RepresentationContract, error) { if len(input) == 0 { return &[]apimanagement.RepresentationContract{}, nil } outputs := make([]apimanagement.RepresentationContract, 0) - for _, v := range input { + for i, v := range input { vs := v.(map[string]interface{}) contentType := vs["content_type"].(string) formParametersRaw := vs["form_parameter"].([]interface{}) - formParameters := ExpandApiManagementOperationParameterContract(formParametersRaw) + formParameters := ExpandApiManagementOperationParameterContract(d, fmt.Sprintf("%s.%d.form_parameter", schemaPath, i), formParametersRaw) sample := vs["sample"].(string) schemaId := vs["schema_id"].(string) typeName := vs["type_name"].(string) @@ -233,20 +233,19 @@ func SchemaApiManagementOperationParameterContract() *pluginsdk.Schema { } } -func ExpandApiManagementOperationParameterContract(input []interface{}) *[]apimanagement.ParameterContract { +func ExpandApiManagementOperationParameterContract(d *pluginsdk.ResourceData, schemaPath string, input []interface{}) *[]apimanagement.ParameterContract { if len(input) == 0 { return &[]apimanagement.ParameterContract{} } outputs := make([]apimanagement.ParameterContract, 0) - for _, v := range input { + for i, v := range input { vs := v.(map[string]interface{}) name := vs["name"].(string) description := vs["description"].(string) paramType := vs["type"].(string) - defaultValue := vs["default_value"].(string) required := vs["required"].(bool) valuesRaw := vs["values"].(*pluginsdk.Set).List() @@ -255,9 +254,16 @@ func ExpandApiManagementOperationParameterContract(input []interface{}) *[]apima Description: utils.String(description), Type: utils.String(paramType), Required: utils.Bool(required), - DefaultValue: utils.String(defaultValue), + DefaultValue: nil, Values: utils.ExpandStringSlice(valuesRaw), } + + // DefaultValue must be included in Values, else it returns error + // when DefaultValue is unset, we need to set it nil + // "" is a valid DefaultValue + if v, ok := d.GetOk(fmt.Sprintf("%s.%d.default_value", schemaPath, i)); ok { + output.DefaultValue = utils.String(v.(string)) + } outputs = append(outputs, output) } diff --git a/azurerm/internal/services/apimanagement/testdata/api_management_api_schema_swagger.json b/azurerm/internal/services/apimanagement/testdata/api_management_api_schema_swagger.json new file mode 100644 index 000000000000..66d0d15d10be --- /dev/null +++ b/azurerm/internal/services/apimanagement/testdata/api_management_api_schema_swagger.json @@ -0,0 +1 @@ +{"schema-bug-example":{"properties":{"Field2":{"description":"Field2","type":"string"},"field1":{"description":"Field1","type":"string"}},"required":["field1","Field2"],"type":"object"}} diff --git a/azurerm/internal/services/apimanagement/validate/api_release_id.go b/azurerm/internal/services/apimanagement/validate/api_release_id.go new file mode 100644 index 000000000000..a6e3296c3a3e --- /dev/null +++ b/azurerm/internal/services/apimanagement/validate/api_release_id.go @@ -0,0 +1,23 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" +) + +func ApiReleaseID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := parse.ApiReleaseID(v); err != nil { + errors = append(errors, err) + } + + return +} diff --git a/azurerm/internal/services/apimanagement/validate/api_release_id_test.go b/azurerm/internal/services/apimanagement/validate/api_release_id_test.go new file mode 100644 index 000000000000..079789cf3597 --- /dev/null +++ b/azurerm/internal/services/apimanagement/validate/api_release_id_test.go @@ -0,0 +1,100 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import "testing" + +func TestApiReleaseID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + // missing ServiceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/", + Valid: false, + }, + + { + // missing value for ServiceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/", + Valid: false, + }, + + { + // missing ApiName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/", + Valid: false, + }, + + { + // missing value for ApiName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/", + Valid: false, + }, + + { + // missing ReleaseName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/api1/", + Valid: false, + }, + + { + // missing value for ReleaseName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/api1/releases/", + Valid: false, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/api1/releases/release1", + Valid: true, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.APIMANAGEMENT/SERVICE/SERVICE1/APIS/API1/RELEASES/RELEASE1", + Valid: false, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := ApiReleaseID(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/azurerm/internal/services/apimanagement/validate/gateway_api_id.go b/azurerm/internal/services/apimanagement/validate/gateway_api_id.go new file mode 100644 index 000000000000..8c8f25723f2b --- /dev/null +++ b/azurerm/internal/services/apimanagement/validate/gateway_api_id.go @@ -0,0 +1,23 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" +) + +func GatewayApiID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := parse.GatewayApiID(v); err != nil { + errors = append(errors, err) + } + + return +} diff --git a/azurerm/internal/services/apimanagement/validate/gateway_api_id_test.go b/azurerm/internal/services/apimanagement/validate/gateway_api_id_test.go new file mode 100644 index 000000000000..82ce16f87847 --- /dev/null +++ b/azurerm/internal/services/apimanagement/validate/gateway_api_id_test.go @@ -0,0 +1,100 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import "testing" + +func TestGatewayApiID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + // missing ServiceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/", + Valid: false, + }, + + { + // missing value for ServiceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/", + Valid: false, + }, + + { + // missing GatewayName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/", + Valid: false, + }, + + { + // missing value for GatewayName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/gateways/", + Valid: false, + }, + + { + // missing ApiName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/gateways/gateway1/", + Valid: false, + }, + + { + // missing value for ApiName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/gateways/gateway1/apis/", + Valid: false, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/gateways/gateway1/apis/api1", + Valid: true, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.APIMANAGEMENT/SERVICE/SERVICE1/GATEWAYS/GATEWAY1/APIS/API1", + Valid: false, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := GatewayApiID(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/azurerm/internal/services/apimanagement/validate/gateway_id.go b/azurerm/internal/services/apimanagement/validate/gateway_id.go new file mode 100644 index 000000000000..4a25dcca4669 --- /dev/null +++ b/azurerm/internal/services/apimanagement/validate/gateway_id.go @@ -0,0 +1,23 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" +) + +func GatewayID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := parse.GatewayID(v); err != nil { + errors = append(errors, err) + } + + return +} diff --git a/azurerm/internal/services/apimanagement/validate/gateway_id_test.go b/azurerm/internal/services/apimanagement/validate/gateway_id_test.go new file mode 100644 index 000000000000..934e0073e617 --- /dev/null +++ b/azurerm/internal/services/apimanagement/validate/gateway_id_test.go @@ -0,0 +1,88 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import "testing" + +func TestGatewayID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + // missing ServiceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/", + Valid: false, + }, + + { + // missing value for ServiceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/", + Valid: false, + }, + + { + // missing Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/", + Valid: false, + }, + + { + // missing value for Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/gateways/", + Valid: false, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/gateways/gateway1", + Valid: true, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.APIMANAGEMENT/SERVICE/SERVICE1/GATEWAYS/GATEWAY1", + Valid: false, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := GatewayID(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/azurerm/internal/services/apimanagement/validate/operation_tag_id.go b/azurerm/internal/services/apimanagement/validate/operation_tag_id.go new file mode 100644 index 000000000000..6c6e5609ca1d --- /dev/null +++ b/azurerm/internal/services/apimanagement/validate/operation_tag_id.go @@ -0,0 +1,23 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" +) + +func OperationTagID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := parse.OperationTagID(v); err != nil { + errors = append(errors, err) + } + + return +} diff --git a/azurerm/internal/services/apimanagement/validate/operation_tag_id_test.go b/azurerm/internal/services/apimanagement/validate/operation_tag_id_test.go new file mode 100644 index 000000000000..36bf41b4327e --- /dev/null +++ b/azurerm/internal/services/apimanagement/validate/operation_tag_id_test.go @@ -0,0 +1,112 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import "testing" + +func TestOperationTagID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + // missing ServiceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/", + Valid: false, + }, + + { + // missing value for ServiceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/", + Valid: false, + }, + + { + // missing ApiName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/", + Valid: false, + }, + + { + // missing value for ApiName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/", + Valid: false, + }, + + { + // missing OperationName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/api1/", + Valid: false, + }, + + { + // missing value for OperationName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/api1/operations/", + Valid: false, + }, + + { + // missing TagName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/api1/operations/operation1/", + Valid: false, + }, + + { + // missing value for TagName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/api1/operations/operation1/tags/", + Valid: false, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/apis/api1/operations/operation1/tags/tag1", + Valid: true, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.APIMANAGEMENT/SERVICE/SERVICE1/APIS/API1/OPERATIONS/OPERATION1/TAGS/TAG1", + Valid: false, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := OperationTagID(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/azurerm/internal/services/apimanagement/validate/redis_cache_id.go b/azurerm/internal/services/apimanagement/validate/redis_cache_id.go new file mode 100644 index 000000000000..9fc80ddcda5c --- /dev/null +++ b/azurerm/internal/services/apimanagement/validate/redis_cache_id.go @@ -0,0 +1,23 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/apimanagement/parse" +) + +func RedisCacheID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := parse.RedisCacheID(v); err != nil { + errors = append(errors, err) + } + + return +} diff --git a/azurerm/internal/services/apimanagement/validate/redis_cache_id_test.go b/azurerm/internal/services/apimanagement/validate/redis_cache_id_test.go new file mode 100644 index 000000000000..ef1bcd778e8e --- /dev/null +++ b/azurerm/internal/services/apimanagement/validate/redis_cache_id_test.go @@ -0,0 +1,88 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import "testing" + +func TestRedisCacheID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + // missing ServiceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/", + Valid: false, + }, + + { + // missing value for ServiceName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/", + Valid: false, + }, + + { + // missing CacheName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/", + Valid: false, + }, + + { + // missing value for CacheName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/caches/", + Valid: false, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/caches/redisCache1", + Valid: true, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.APIMANAGEMENT/SERVICE/SERVICE1/CACHES/REDISCACHE1", + Valid: false, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := RedisCacheID(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/azurerm/internal/services/apimanagement/validate/redis_cache_location.go b/azurerm/internal/services/apimanagement/validate/redis_cache_location.go new file mode 100644 index 000000000000..f151fc697a2c --- /dev/null +++ b/azurerm/internal/services/apimanagement/validate/redis_cache_location.go @@ -0,0 +1,21 @@ +package validate + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" +) + +func RedisCacheLocation(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if v == "default" { + return warnings, errors + } + + return location.EnhancedValidate(v, key) +} diff --git a/azurerm/internal/services/appconfiguration/app_configuration_data_source.go b/azurerm/internal/services/appconfiguration/app_configuration_data_source.go index bbb397c293ff..80df1c9bea48 100644 --- a/azurerm/internal/services/appconfiguration/app_configuration_data_source.go +++ b/azurerm/internal/services/appconfiguration/app_configuration_data_source.go @@ -4,14 +4,15 @@ import ( "fmt" "time" + "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/appconfiguration/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/appconfiguration/sdk/configurationstores" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/appconfiguration/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) func dataSourceAppConfiguration() *pluginsdk.Resource { @@ -145,7 +146,7 @@ func dataSourceAppConfiguration() *pluginsdk.Resource { } func dataSourceAppConfigurationRead(d *pluginsdk.ResourceData, meta interface{}) error { - client := meta.(*clients.Client).AppConfiguration.AppConfigurationsClient + client := meta.(*clients.Client).AppConfiguration.ConfigurationStoresClient subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -153,41 +154,39 @@ func dataSourceAppConfigurationRead(d *pluginsdk.ResourceData, meta interface{}) name := d.Get("name").(string) resourceGroup := d.Get("resource_group_name").(string) - resp, err := client.Get(ctx, resourceGroup, name) + id := configurationstores.NewConfigurationStoreID(subscriptionId, resourceGroup, name) + resp, err := client.Get(ctx, id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { - return fmt.Errorf("App Configuration %q was not found in Resource Group %q", name, resourceGroup) + if response.WasNotFound(resp.HttpResponse) { + return fmt.Errorf("%s was not found", id) } - return fmt.Errorf("Error retrieving App Configuration %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("retrieving %s: %+v", id, err) } - resultPage, err := client.ListKeys(ctx, resourceGroup, name, "") + resultPage, err := client.ListKeysComplete(ctx, id) if err != nil { - return fmt.Errorf("Failed to receive access keys for App Configuration %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("retrieving access keys for %s: %+v", id, err) } - d.SetId(parse.NewConfigurationStoreID(subscriptionId, resourceGroup, name).ID()) + d.SetId(id.ID()) - if location := resp.Location; location != nil { - d.Set("location", azure.NormalizeLocation(*location)) - } + if model := resp.Model; model != nil { + d.Set("location", location.Normalize(model.Location)) + d.Set("sku", model.Sku.Name) - skuName := "" - if resp.Sku != nil && resp.Sku.Name != nil { - skuName = *resp.Sku.Name - } - d.Set("sku", skuName) + if props := model.Properties; props != nil { + d.Set("endpoint", props.Endpoint) + } - if props := resp.ConfigurationStoreProperties; props != nil { - d.Set("endpoint", props.Endpoint) - } + accessKeys := flattenAppConfigurationAccessKeys(resultPage.Items) + d.Set("primary_read_key", accessKeys.primaryReadKey) + d.Set("primary_write_key", accessKeys.primaryWriteKey) + d.Set("secondary_read_key", accessKeys.secondaryReadKey) + d.Set("secondary_write_key", accessKeys.secondaryWriteKey) - accessKeys := flattenAppConfigurationAccessKeys(resultPage.Values()) - d.Set("primary_read_key", accessKeys.primaryReadKey) - d.Set("primary_write_key", accessKeys.primaryWriteKey) - d.Set("secondary_read_key", accessKeys.secondaryReadKey) - d.Set("secondary_write_key", accessKeys.secondaryWriteKey) + return tags.FlattenAndSet(d, flattenTags(model.Tags)) + } - return tags.FlattenAndSet(d, resp.Tags) + return nil } diff --git a/azurerm/internal/services/appconfiguration/app_configuration_resource.go b/azurerm/internal/services/appconfiguration/app_configuration_resource.go index b9c04c4c398d..fa91fa90edcc 100644 --- a/azurerm/internal/services/appconfiguration/app_configuration_resource.go +++ b/azurerm/internal/services/appconfiguration/app_configuration_resource.go @@ -11,13 +11,13 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/appconfiguration/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/appconfiguration/sdk/configurationstores" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/appconfiguration/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) func resourceAppConfiguration() *pluginsdk.Resource { @@ -35,7 +35,7 @@ func resourceAppConfiguration() *pluginsdk.Resource { }, Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { - _, err := parse.ConfigurationStoreID(id) + _, err := configurationstores.ConfigurationStoreID(id) return err }), @@ -195,7 +195,7 @@ func resourceAppConfiguration() *pluginsdk.Resource { } func resourceAppConfigurationCreate(d *pluginsdk.ResourceData, meta interface{}) error { - client := meta.(*clients.Client).AppConfiguration.AppConfigurationsClient + client := meta.(*clients.Client).AppConfiguration.ConfigurationStoresClient subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -204,161 +204,128 @@ func resourceAppConfigurationCreate(d *pluginsdk.ResourceData, meta interface{}) name := d.Get("name").(string) resourceGroup := d.Get("resource_group_name").(string) - resourceId := parse.NewConfigurationStoreID(subscriptionId, resourceGroup, name).ID() - existing, err := client.Get(ctx, resourceGroup, name) + resourceId := configurationstores.NewConfigurationStoreID(subscriptionId, resourceGroup, name) + existing, err := client.Get(ctx, resourceId) if err != nil { - if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("Error checking for presence of existing App Configuration %q (Resource Group %q): %s", name, resourceGroup, err) + if !response.WasNotFound(existing.HttpResponse) { + return fmt.Errorf("checking for presence of existing %s: %+v", resourceId, err) } } - if !utils.ResponseWasNotFound(existing.Response) { - return tf.ImportAsExistsError("azurerm_app_configuration", resourceId) + if !response.WasNotFound(existing.HttpResponse) { + return tf.ImportAsExistsError("azurerm_app_configuration", resourceId.ID()) } - parameters := appconfiguration.ConfigurationStore{ - Location: utils.String(azure.NormalizeLocation(d.Get("location").(string))), - Sku: &appconfiguration.Sku{ - Name: utils.String(d.Get("sku").(string)), + parameters := configurationstores.ConfigurationStore{ + Location: azure.NormalizeLocation(d.Get("location").(string)), + Sku: configurationstores.Sku{ + Name: d.Get("sku").(string), }, - Tags: tags.Expand(d.Get("tags").(map[string]interface{})), + Tags: expandTags(d.Get("tags").(map[string]interface{})), } parameters.Identity = expandAppConfigurationIdentity(d.Get("identity").([]interface{})) - future, err := client.Create(ctx, resourceGroup, name, parameters) - if err != nil { - return fmt.Errorf("Error creating App Configuration %q (Resource Group %q): %+v", name, resourceGroup, err) - } - - if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for creation of App Configuration %q (Resource Group %q): %+v", name, resourceGroup, err) + if err := client.CreateThenPoll(ctx, resourceId, parameters); err != nil { + return fmt.Errorf("creating %s: %+v", resourceId, err) } - d.SetId(resourceId) + d.SetId(resourceId.ID()) return resourceAppConfigurationRead(d, meta) } func resourceAppConfigurationUpdate(d *pluginsdk.ResourceData, meta interface{}) error { - client := meta.(*clients.Client).AppConfiguration.AppConfigurationsClient - ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + client := meta.(*clients.Client).AppConfiguration.ConfigurationStoresClient + ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() log.Printf("[INFO] preparing arguments for Azure ARM App Configuration update.") - id, err := parse.ConfigurationStoreID(d.Id()) + id, err := configurationstores.ConfigurationStoreID(d.Id()) if err != nil { return err } - parameters := appconfiguration.ConfigurationStoreUpdateParameters{ - Sku: &appconfiguration.Sku{ - Name: utils.String(d.Get("sku").(string)), + parameters := configurationstores.ConfigurationStoreUpdateParameters{ + Sku: &configurationstores.Sku{ + Name: d.Get("sku").(string), }, - Tags: tags.Expand(d.Get("tags").(map[string]interface{})), + Tags: expandTags(d.Get("tags").(map[string]interface{})), } if d.HasChange("identity") { parameters.Identity = expandAppConfigurationIdentity(d.Get("identity").([]interface{})) } - future, err := client.Update(ctx, id.ResourceGroup, id.Name, parameters) - if err != nil { - return fmt.Errorf("Error updating App Configuration %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) - } - - if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for update of App Configuration %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) - } - - read, err := client.Get(ctx, id.ResourceGroup, id.Name) - if err != nil { - return fmt.Errorf("Error retrieving App Configuration %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) - } - if read.ID == nil { - return fmt.Errorf("Cannot read App Configuration %s (resource Group %q) ID", id.Name, id.ResourceGroup) + if err := client.UpdateThenPoll(ctx, *id, parameters); err != nil { + return fmt.Errorf("updating %s: %+v", *id, err) } - d.SetId(*read.ID) - return resourceAppConfigurationRead(d, meta) } func resourceAppConfigurationRead(d *pluginsdk.ResourceData, meta interface{}) error { - client := meta.(*clients.Client).AppConfiguration.AppConfigurationsClient + client := meta.(*clients.Client).AppConfiguration.ConfigurationStoresClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.ConfigurationStoreID(d.Id()) + id, err := configurationstores.ConfigurationStoreID(d.Id()) if err != nil { return err } - resp, err := client.Get(ctx, id.ResourceGroup, id.Name) + resp, err := client.Get(ctx, *id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { - log.Printf("[DEBUG] App Configuration %q was not found in Resource Group %q - removing from state!", id.Name, id.ResourceGroup) + if response.WasNotFound(resp.HttpResponse) { + log.Printf("[DEBUG] %s was not found - removing from state!", *id) d.SetId("") return nil } - return fmt.Errorf("Error making Read request on App Configuration %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + return fmt.Errorf("retrieving %s: %+v", *id, err) } - resultPage, err := client.ListKeys(ctx, id.ResourceGroup, id.Name, "") + resultPage, err := client.ListKeysComplete(ctx, *id) if err != nil { - return fmt.Errorf("Failed to receive access keys for App Configuration %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + return fmt.Errorf("retrieving access keys for %s: %+v", *id, err) } d.Set("name", id.Name) d.Set("resource_group_name", id.ResourceGroup) - if location := resp.Location; location != nil { - d.Set("location", azure.NormalizeLocation(*location)) - } - skuName := "" - if resp.Sku != nil && resp.Sku.Name != nil { - skuName = *resp.Sku.Name - } - d.Set("sku", skuName) + if model := resp.Model; model != nil { + d.Set("location", location.Normalize(model.Location)) + d.Set("sku", model.Sku.Name) - if props := resp.ConfigurationStoreProperties; props != nil { - d.Set("endpoint", props.Endpoint) - } + if props := model.Properties; props != nil { + d.Set("endpoint", props.Endpoint) + } + + accessKeys := flattenAppConfigurationAccessKeys(resultPage.Items) + d.Set("primary_read_key", accessKeys.primaryReadKey) + d.Set("primary_write_key", accessKeys.primaryWriteKey) + d.Set("secondary_read_key", accessKeys.secondaryReadKey) + d.Set("secondary_write_key", accessKeys.secondaryWriteKey) - accessKeys := flattenAppConfigurationAccessKeys(resultPage.Values()) - d.Set("primary_read_key", accessKeys.primaryReadKey) - d.Set("primary_write_key", accessKeys.primaryWriteKey) - d.Set("secondary_read_key", accessKeys.secondaryReadKey) - d.Set("secondary_write_key", accessKeys.secondaryWriteKey) + if err := d.Set("identity", flattenAppConfigurationIdentity(model.Identity)); err != nil { + return fmt.Errorf("setting `identity`: %+v", err) + } - if err := d.Set("identity", flattenAppConfigurationIdentity(resp.Identity)); err != nil { - return fmt.Errorf("Error setting `identity`: %+v", err) + return tags.FlattenAndSet(d, flattenTags(model.Tags)) } - return tags.FlattenAndSet(d, resp.Tags) + return nil } func resourceAppConfigurationDelete(d *pluginsdk.ResourceData, meta interface{}) error { - client := meta.(*clients.Client).AppConfiguration.AppConfigurationsClient + client := meta.(*clients.Client).AppConfiguration.ConfigurationStoresClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.ConfigurationStoreID(d.Id()) + id, err := configurationstores.ConfigurationStoreID(d.Id()) if err != nil { return err } - fut, err := client.Delete(ctx, id.ResourceGroup, id.Name) - if err != nil { - if response.WasNotFound(fut.Response()) { - return nil - } - return fmt.Errorf("Error deleting App Configuration %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) - } - - if err = fut.WaitForCompletionRef(ctx, client.Client); err != nil { - if response.WasNotFound(fut.Response()) { - return nil - } - return fmt.Errorf("Error deleting App Configuration %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + if err := client.DeleteThenPoll(ctx, *id); err != nil { + return fmt.Errorf("deleting %s: %+v", *id, err) } return nil @@ -371,7 +338,7 @@ type flattenedAccessKeys struct { secondaryWriteKey []interface{} } -func flattenAppConfigurationAccessKeys(values []appconfiguration.APIKey) flattenedAccessKeys { +func flattenAppConfigurationAccessKeys(values []configurationstores.AccessKey) flattenedAccessKeys { result := flattenedAccessKeys{ primaryReadKey: make([]interface{}, 0), primaryWriteKey: make([]interface{}, 0), @@ -408,7 +375,7 @@ func flattenAppConfigurationAccessKeys(values []appconfiguration.APIKey) flatten return result } -func flattenAppConfigurationAccessKey(input appconfiguration.APIKey) []interface{} { +func flattenAppConfigurationAccessKey(input configurationstores.AccessKey) []interface{} { connectionString := "" if input.ConnectionString != nil { @@ -434,37 +401,44 @@ func flattenAppConfigurationAccessKey(input appconfiguration.APIKey) []interface } } -func expandAppConfigurationIdentity(identities []interface{}) *appconfiguration.ResourceIdentity { - if len(identities) == 0 { - return &appconfiguration.ResourceIdentity{ - Type: appconfiguration.IdentityTypeNone, +func expandAppConfigurationIdentity(identities []interface{}) *configurationstores.ResourceIdentity { + var out = func(in configurationstores.IdentityType) *configurationstores.ResourceIdentity { + return &configurationstores.ResourceIdentity{ + Type: &in, } } - identity := identities[0].(map[string]interface{}) - identityType := appconfiguration.IdentityType(identity["type"].(string)) - return &appconfiguration.ResourceIdentity{ - Type: identityType, + + if len(identities) == 0 { + return out(configurationstores.IdentityTypeNone) } + identity := identities[0].(map[string]interface{}) + identityType := configurationstores.IdentityType(identity["type"].(string)) + return out(identityType) } -func flattenAppConfigurationIdentity(identity *appconfiguration.ResourceIdentity) []interface{} { - if identity == nil || identity.Type == appconfiguration.IdentityTypeNone { +func flattenAppConfigurationIdentity(identity *configurationstores.ResourceIdentity) []interface{} { + if identity == nil || identity.Type == nil || *identity.Type == configurationstores.IdentityTypeNone { return []interface{}{} } + identityType := "" + if identity.Type != nil { + identityType = string(*identity.Type) + } + principalId := "" - if identity.PrincipalID != nil { - principalId = *identity.PrincipalID + if identity.PrincipalId != nil { + principalId = *identity.PrincipalId } tenantId := "" - if identity.TenantID != nil { - tenantId = *identity.TenantID + if identity.TenantId != nil { + tenantId = *identity.TenantId } return []interface{}{ map[string]interface{}{ - "type": string(identity.Type), + "type": identityType, "principal_id": principalId, "tenant_id": tenantId, }, diff --git a/azurerm/internal/services/appconfiguration/app_configuration_resource_test.go b/azurerm/internal/services/appconfiguration/app_configuration_resource_test.go index bbe7831ee033..f9f3bdacd946 100644 --- a/azurerm/internal/services/appconfiguration/app_configuration_resource_test.go +++ b/azurerm/internal/services/appconfiguration/app_configuration_resource_test.go @@ -5,11 +5,10 @@ import ( "fmt" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/appconfiguration/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/appconfiguration/sdk/configurationstores" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -145,17 +144,17 @@ func TestAccAppConfiguration_update(t *testing.T) { } func (t AppConfigurationResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := parse.ConfigurationStoreID(state.ID) + id, err := configurationstores.ConfigurationStoreID(state.ID) if err != nil { return nil, err } - resp, err := clients.AppConfiguration.AppConfigurationsClient.Get(ctx, id.ResourceGroup, id.Name) + resp, err := clients.AppConfiguration.ConfigurationStoresClient.Get(ctx, *id) if err != nil { return nil, fmt.Errorf("retrieving App Configuration %q (resource group: %q): %+v", id.Name, id.ResourceGroup, err) } - return utils.Bool(resp.ConfigurationStoreProperties != nil), nil + return utils.Bool(resp.Model != nil), nil } func (AppConfigurationResource) free(data acceptance.TestData) string { diff --git a/azurerm/internal/services/appconfiguration/client/client.go b/azurerm/internal/services/appconfiguration/client/client.go index 4f1f9c4cfdce..45f1d21d6347 100644 --- a/azurerm/internal/services/appconfiguration/client/client.go +++ b/azurerm/internal/services/appconfiguration/client/client.go @@ -1,19 +1,19 @@ package client import ( - appconf "github.com/Azure/azure-sdk-for-go/services/appconfiguration/mgmt/2020-06-01/appconfiguration" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/common" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/appconfiguration/sdk/configurationstores" ) type Client struct { - AppConfigurationsClient *appconf.ConfigurationStoresClient + ConfigurationStoresClient *configurationstores.ConfigurationStoresClient } func NewClient(o *common.ClientOptions) *Client { - AppConfigurationsClient := appconf.NewConfigurationStoresClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) - o.ConfigureClient(&AppConfigurationsClient.Client, o.ResourceManagerAuthorizer) + configurationStores := configurationstores.NewConfigurationStoresClientWithBaseURI(o.ResourceManagerEndpoint) + o.ConfigureClient(&configurationStores.Client, o.ResourceManagerAuthorizer) return &Client{ - AppConfigurationsClient: &AppConfigurationsClient, + ConfigurationStoresClient: &configurationStores, } } diff --git a/azurerm/internal/services/appconfiguration/parse/configuration_store.go b/azurerm/internal/services/appconfiguration/parse/configuration_store.go deleted file mode 100644 index bb7e30ae6738..000000000000 --- a/azurerm/internal/services/appconfiguration/parse/configuration_store.go +++ /dev/null @@ -1,69 +0,0 @@ -package parse - -// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten - -import ( - "fmt" - "strings" - - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" -) - -type ConfigurationStoreId struct { - SubscriptionId string - ResourceGroup string - Name string -} - -func NewConfigurationStoreID(subscriptionId, resourceGroup, name string) ConfigurationStoreId { - return ConfigurationStoreId{ - SubscriptionId: subscriptionId, - ResourceGroup: resourceGroup, - Name: name, - } -} - -func (id ConfigurationStoreId) String() string { - segments := []string{ - fmt.Sprintf("Name %q", id.Name), - fmt.Sprintf("Resource Group %q", id.ResourceGroup), - } - segmentsStr := strings.Join(segments, " / ") - return fmt.Sprintf("%s: (%s)", "Configuration Store", segmentsStr) -} - -func (id ConfigurationStoreId) ID() string { - fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.AppConfiguration/configurationStores/%s" - return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.Name) -} - -// ConfigurationStoreID parses a ConfigurationStore ID into an ConfigurationStoreId struct -func ConfigurationStoreID(input string) (*ConfigurationStoreId, error) { - id, err := azure.ParseAzureResourceID(input) - if err != nil { - return nil, err - } - - resourceId := ConfigurationStoreId{ - SubscriptionId: id.SubscriptionID, - ResourceGroup: id.ResourceGroup, - } - - if resourceId.SubscriptionId == "" { - return nil, fmt.Errorf("ID was missing the 'subscriptions' element") - } - - if resourceId.ResourceGroup == "" { - return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") - } - - if resourceId.Name, err = id.PopSegment("configurationStores"); err != nil { - return nil, err - } - - if err := id.ValidateNoEmptySegments(input); err != nil { - return nil, err - } - - return &resourceId, nil -} diff --git a/azurerm/internal/services/appconfiguration/parse/configuration_store_test.go b/azurerm/internal/services/appconfiguration/parse/configuration_store_test.go deleted file mode 100644 index e889ea1c25d7..000000000000 --- a/azurerm/internal/services/appconfiguration/parse/configuration_store_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package parse_test - -// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten - -import ( - "testing" - - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceid" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/appconfiguration/parse" -) - -var _ resourceid.Formatter = parse.ConfigurationStoreId{} - -func TestConfigurationStoreIDFormatter(t *testing.T) { - actual := parse.NewConfigurationStoreID("12345678-1234-9876-4563-123456789012", "group1", "store1").ID() - expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.AppConfiguration/configurationStores/store1" - if actual != expected { - t.Fatalf("Expected %q but got %q", expected, actual) - } -} - -func TestConfigurationStoreID(t *testing.T) { - testData := []struct { - Input string - Error bool - Expected *parse.ConfigurationStoreId - }{ - - { - // empty - Input: "", - Error: true, - }, - - { - // missing SubscriptionId - Input: "/", - Error: true, - }, - - { - // missing value for SubscriptionId - Input: "/subscriptions/", - Error: true, - }, - - { - // missing ResourceGroup - Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", - Error: true, - }, - - { - // missing value for ResourceGroup - Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", - Error: true, - }, - - { - // missing Name - Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.AppConfiguration/", - Error: true, - }, - - { - // missing value for Name - Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.AppConfiguration/configurationStores/", - Error: true, - }, - - { - // valid - Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.AppConfiguration/configurationStores/store1", - Expected: &parse.ConfigurationStoreId{ - SubscriptionId: "12345678-1234-9876-4563-123456789012", - ResourceGroup: "group1", - Name: "store1", - }, - }, - - { - // upper-cased - Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/GROUP1/PROVIDERS/MICROSOFT.APPCONFIGURATION/CONFIGURATIONSTORES/STORE1", - Error: true, - }, - } - - for _, v := range testData { - t.Logf("[DEBUG] Testing %q", v.Input) - - actual, err := parse.ConfigurationStoreID(v.Input) - if err != nil { - if v.Error { - continue - } - - t.Fatalf("Expect a value but got an error: %s", err) - } - if v.Error { - t.Fatal("Expect an error but didn't get one") - } - - if actual.SubscriptionId != v.Expected.SubscriptionId { - t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) - } - if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) - } - if actual.Name != v.Expected.Name { - t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) - } - } -} diff --git a/azurerm/internal/services/appconfiguration/resourceids.go b/azurerm/internal/services/appconfiguration/resourceids.go deleted file mode 100644 index b49b87985aba..000000000000 --- a/azurerm/internal/services/appconfiguration/resourceids.go +++ /dev/null @@ -1,3 +0,0 @@ -package appconfiguration - -//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=ConfigurationStore -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.AppConfiguration/configurationStores/store1 diff --git a/azurerm/internal/services/appconfiguration/sdk/configurationstores/client.go b/azurerm/internal/services/appconfiguration/sdk/configurationstores/client.go new file mode 100644 index 000000000000..ac4377b23a6d --- /dev/null +++ b/azurerm/internal/services/appconfiguration/sdk/configurationstores/client.go @@ -0,0 +1,15 @@ +package configurationstores + +import "github.com/Azure/go-autorest/autorest" + +type ConfigurationStoresClient struct { + Client autorest.Client + baseUri string +} + +func NewConfigurationStoresClientWithBaseURI(endpoint string) ConfigurationStoresClient { + return ConfigurationStoresClient{ + Client: autorest.NewClientWithUserAgent(userAgent()), + baseUri: endpoint, + } +} diff --git a/azurerm/internal/services/appconfiguration/sdk/configurationstores/constants.go b/azurerm/internal/services/appconfiguration/sdk/configurationstores/constants.go new file mode 100644 index 000000000000..0503eacb9e91 --- /dev/null +++ b/azurerm/internal/services/appconfiguration/sdk/configurationstores/constants.go @@ -0,0 +1,44 @@ +package configurationstores + +type ActionsRequired string + +const ( + ActionsRequiredNone ActionsRequired = "None" + ActionsRequiredRecreate ActionsRequired = "Recreate" +) + +type ConnectionStatus string + +const ( + ConnectionStatusApproved ConnectionStatus = "Approved" + ConnectionStatusDisconnected ConnectionStatus = "Disconnected" + ConnectionStatusPending ConnectionStatus = "Pending" + ConnectionStatusRejected ConnectionStatus = "Rejected" +) + +type IdentityType string + +const ( + IdentityTypeNone IdentityType = "None" + IdentityTypeSystemAssigned IdentityType = "SystemAssigned" + IdentityTypeSystemAssignedUserAssigned IdentityType = "SystemAssigned, UserAssigned" + IdentityTypeUserAssigned IdentityType = "UserAssigned" +) + +type ProvisioningState string + +const ( + ProvisioningStateCanceled ProvisioningState = "Canceled" + ProvisioningStateCreating ProvisioningState = "Creating" + ProvisioningStateDeleting ProvisioningState = "Deleting" + ProvisioningStateFailed ProvisioningState = "Failed" + ProvisioningStateSucceeded ProvisioningState = "Succeeded" + ProvisioningStateUpdating ProvisioningState = "Updating" +) + +type PublicNetworkAccess string + +const ( + PublicNetworkAccessDisabled PublicNetworkAccess = "Disabled" + PublicNetworkAccessEnabled PublicNetworkAccess = "Enabled" +) diff --git a/azurerm/internal/services/appconfiguration/sdk/configurationstores/id_configurationstore.go b/azurerm/internal/services/appconfiguration/sdk/configurationstores/id_configurationstore.go new file mode 100644 index 000000000000..8f4a4d81efa9 --- /dev/null +++ b/azurerm/internal/services/appconfiguration/sdk/configurationstores/id_configurationstore.go @@ -0,0 +1,108 @@ +package configurationstores + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +type ConfigurationStoreId struct { + SubscriptionId string + ResourceGroup string + Name string +} + +func NewConfigurationStoreID(subscriptionId, resourceGroup, name string) ConfigurationStoreId { + return ConfigurationStoreId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + Name: name, + } +} + +func (id ConfigurationStoreId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Configuration Store", segmentsStr) +} + +func (id ConfigurationStoreId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.AppConfiguration/configurationStores/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.Name) +} + +// ConfigurationStoreID parses a ConfigurationStore ID into an ConfigurationStoreId struct +func ConfigurationStoreID(input string) (*ConfigurationStoreId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := ConfigurationStoreId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.Name, err = id.PopSegment("configurationStores"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} + +// ConfigurationStoreIDInsensitively parses an ConfigurationStore ID into an ConfigurationStoreId struct, insensitively +// This should only be used to parse an ID for rewriting to a consistent casing, +// the ConfigurationStoreID method should be used instead for validation etc. +func ConfigurationStoreIDInsensitively(input string) (*ConfigurationStoreId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := ConfigurationStoreId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + // find the correct casing for the 'configurationStores' segment + configurationStoresKey := "configurationStores" + for key := range id.Path { + if strings.EqualFold(key, configurationStoresKey) { + configurationStoresKey = key + break + } + } + if resourceId.Name, err = id.PopSegment(configurationStoresKey); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/appconfiguration/sdk/configurationstores/id_configurationstore_test.go b/azurerm/internal/services/appconfiguration/sdk/configurationstores/id_configurationstore_test.go new file mode 100644 index 000000000000..b666b3ca1147 --- /dev/null +++ b/azurerm/internal/services/appconfiguration/sdk/configurationstores/id_configurationstore_test.go @@ -0,0 +1,227 @@ +package configurationstores + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.Id = ConfigurationStoreId{} + +func TestConfigurationStoreIDFormatter(t *testing.T) { + actual := NewConfigurationStoreID("{subscriptionId}", "{resourceGroupName}", "{configStoreName}").ID() + expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppConfiguration/configurationStores/{configStoreName}" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestConfigurationStoreID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ConfigurationStoreId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppConfiguration/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppConfiguration/configurationStores/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppConfiguration/configurationStores/{configStoreName}", + Expected: &ConfigurationStoreId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{configStoreName}", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.APPCONFIGURATION/CONFIGURATIONSTORES/{CONFIGSTORENAME}", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ConfigurationStoreID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} + +func TestConfigurationStoreIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ConfigurationStoreId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppConfiguration/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppConfiguration/configurationStores/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppConfiguration/configurationStores/{configStoreName}", + Expected: &ConfigurationStoreId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{configStoreName}", + }, + }, + + { + // lower-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppConfiguration/configurationstores/{configStoreName}", + Expected: &ConfigurationStoreId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{configStoreName}", + }, + }, + + { + // upper-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppConfiguration/CONFIGURATIONSTORES/{configStoreName}", + Expected: &ConfigurationStoreId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{configStoreName}", + }, + }, + + { + // mixed-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppConfiguration/CoNfIgUrAtIoNsToReS/{configStoreName}", + Expected: &ConfigurationStoreId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{configStoreName}", + }, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ConfigurationStoreIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/appconfiguration/sdk/configurationstores/method_create_autorest.go b/azurerm/internal/services/appconfiguration/sdk/configurationstores/method_create_autorest.go new file mode 100644 index 000000000000..78b3ada60b0b --- /dev/null +++ b/azurerm/internal/services/appconfiguration/sdk/configurationstores/method_create_autorest.go @@ -0,0 +1,75 @@ +package configurationstores + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/polling" +) + +type CreateResponse struct { + Poller polling.LongRunningPoller + HttpResponse *http.Response +} + +// Create ... +func (c ConfigurationStoresClient) Create(ctx context.Context, id ConfigurationStoreId, input ConfigurationStore) (result CreateResponse, err error) { + req, err := c.preparerForCreate(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "configurationstores.ConfigurationStoresClient", "Create", nil, "Failure preparing request") + return + } + + result, err = c.senderForCreate(ctx, req) + if err != nil { + err = autorest.NewErrorWithError(err, "configurationstores.ConfigurationStoresClient", "Create", result.HttpResponse, "Failure sending request") + return + } + + return +} + +// CreateThenPoll performs Create then polls until it's completed +func (c ConfigurationStoresClient) CreateThenPoll(ctx context.Context, id ConfigurationStoreId, input ConfigurationStore) error { + result, err := c.Create(ctx, id, input) + if err != nil { + return fmt.Errorf("performing Create: %+v", err) + } + + if err := result.Poller.PollUntilDone(); err != nil { + return fmt.Errorf("polling after Create: %+v", err) + } + + return nil +} + +// preparerForCreate prepares the Create request. +func (c ConfigurationStoresClient) preparerForCreate(ctx context.Context, id ConfigurationStoreId, input ConfigurationStore) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// senderForCreate sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (c ConfigurationStoresClient) senderForCreate(ctx context.Context, req *http.Request) (future CreateResponse, err error) { + var resp *http.Response + resp, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + return + } + future.Poller, err = polling.NewLongRunningPollerFromResponse(ctx, resp, c.Client) + return +} diff --git a/azurerm/internal/services/appconfiguration/sdk/configurationstores/method_delete_autorest.go b/azurerm/internal/services/appconfiguration/sdk/configurationstores/method_delete_autorest.go new file mode 100644 index 000000000000..91970e504773 --- /dev/null +++ b/azurerm/internal/services/appconfiguration/sdk/configurationstores/method_delete_autorest.go @@ -0,0 +1,73 @@ +package configurationstores + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/polling" +) + +type DeleteResponse struct { + Poller polling.LongRunningPoller + HttpResponse *http.Response +} + +// Delete ... +func (c ConfigurationStoresClient) Delete(ctx context.Context, id ConfigurationStoreId) (result DeleteResponse, err error) { + req, err := c.preparerForDelete(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "configurationstores.ConfigurationStoresClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = c.senderForDelete(ctx, req) + if err != nil { + err = autorest.NewErrorWithError(err, "configurationstores.ConfigurationStoresClient", "Delete", result.HttpResponse, "Failure sending request") + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c ConfigurationStoresClient) DeleteThenPoll(ctx context.Context, id ConfigurationStoreId) error { + result, err := c.Delete(ctx, id) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} + +// preparerForDelete prepares the Delete request. +func (c ConfigurationStoresClient) preparerForDelete(ctx context.Context, id ConfigurationStoreId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// senderForDelete sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (c ConfigurationStoresClient) senderForDelete(ctx context.Context, req *http.Request) (future DeleteResponse, err error) { + var resp *http.Response + resp, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + return + } + future.Poller, err = polling.NewLongRunningPollerFromResponse(ctx, resp, c.Client) + return +} diff --git a/azurerm/internal/services/appconfiguration/sdk/configurationstores/method_get_autorest.go b/azurerm/internal/services/appconfiguration/sdk/configurationstores/method_get_autorest.go new file mode 100644 index 000000000000..ea7ed1dddfbb --- /dev/null +++ b/azurerm/internal/services/appconfiguration/sdk/configurationstores/method_get_autorest.go @@ -0,0 +1,64 @@ +package configurationstores + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type GetResponse struct { + HttpResponse *http.Response + Model *ConfigurationStore +} + +// Get ... +func (c ConfigurationStoresClient) Get(ctx context.Context, id ConfigurationStoreId) (result GetResponse, err error) { + req, err := c.preparerForGet(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "configurationstores.ConfigurationStoresClient", "Get", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "configurationstores.ConfigurationStoresClient", "Get", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForGet(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "configurationstores.ConfigurationStoresClient", "Get", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForGet prepares the Get request. +func (c ConfigurationStoresClient) preparerForGet(ctx context.Context, id ConfigurationStoreId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForGet handles the response to the Get request. The method always +// closes the http.Response Body. +func (c ConfigurationStoresClient) responderForGet(resp *http.Response) (result GetResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/appconfiguration/sdk/configurationstores/method_listkeys_autorest.go b/azurerm/internal/services/appconfiguration/sdk/configurationstores/method_listkeys_autorest.go new file mode 100644 index 000000000000..d312a3437be5 --- /dev/null +++ b/azurerm/internal/services/appconfiguration/sdk/configurationstores/method_listkeys_autorest.go @@ -0,0 +1,196 @@ +package configurationstores + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type ListKeysResponse struct { + HttpResponse *http.Response + Model *[]AccessKey + + nextLink *string + nextPageFunc func(ctx context.Context, nextLink string) (ListKeysResponse, error) +} + +type ListKeysCompleteResult struct { + Items []AccessKey +} + +func (r ListKeysResponse) HasMore() bool { + return r.nextLink != nil +} + +func (r ListKeysResponse) LoadMore(ctx context.Context) (resp ListKeysResponse, err error) { + if !r.HasMore() { + err = fmt.Errorf("no more pages returned") + return + } + return r.nextPageFunc(ctx, *r.nextLink) +} + +type AccessKeyPredicate struct { + // TODO: implement me +} + +func (p AccessKeyPredicate) Matches(input AccessKey) bool { + // TODO: implement me + // if p.Name != nil && input.Name != *p.Name { + // return false + // } + + return true +} + +// ListKeys ... +func (c ConfigurationStoresClient) ListKeys(ctx context.Context, id ConfigurationStoreId) (resp ListKeysResponse, err error) { + req, err := c.preparerForListKeys(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "configurationstores.ConfigurationStoresClient", "ListKeys", nil, "Failure preparing request") + return + } + + resp.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "configurationstores.ConfigurationStoresClient", "ListKeys", resp.HttpResponse, "Failure sending request") + return + } + + resp, err = c.responderForListKeys(resp.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "configurationstores.ConfigurationStoresClient", "ListKeys", resp.HttpResponse, "Failure responding to request") + return + } + return +} + +// ListKeysCompleteMatchingPredicate retrieves all of the results into a single object +func (c ConfigurationStoresClient) ListKeysComplete(ctx context.Context, id ConfigurationStoreId) (ListKeysCompleteResult, error) { + return c.ListKeysCompleteMatchingPredicate(ctx, id, AccessKeyPredicate{}) +} + +// ListKeysCompleteMatchingPredicate retrieves all of the results and then applied the predicate +func (c ConfigurationStoresClient) ListKeysCompleteMatchingPredicate(ctx context.Context, id ConfigurationStoreId, predicate AccessKeyPredicate) (resp ListKeysCompleteResult, err error) { + items := make([]AccessKey, 0) + + page, err := c.ListKeys(ctx, id) + if err != nil { + err = fmt.Errorf("loading the initial page: %+v", err) + return + } + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + for page.HasMore() { + page, err = page.LoadMore(ctx) + if err != nil { + err = fmt.Errorf("loading the next page: %+v", err) + return + } + + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + } + + out := ListKeysCompleteResult{ + Items: items, + } + return out, nil +} + +// preparerForListKeys prepares the ListKeys request. +func (c ConfigurationStoresClient) preparerForListKeys(ctx context.Context, id ConfigurationStoreId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/listKeys", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// preparerForListKeysWithNextLink prepares the ListKeys request with the given nextLink token. +func (c ConfigurationStoresClient) preparerForListKeysWithNextLink(ctx context.Context, nextLink string) (*http.Request, error) { + uri, err := url.Parse(nextLink) + if err != nil { + return nil, fmt.Errorf("parsing nextLink %q: %+v", nextLink, err) + } + queryParameters := map[string]interface{}{} + for k, v := range uri.Query() { + if len(v) == 0 { + continue + } + val := v[0] + val = autorest.Encode("query", val) + queryParameters[k] = val + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(uri.Path), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForListKeys handles the response to the ListKeys request. The method always +// closes the http.Response Body. +func (c ConfigurationStoresClient) responderForListKeys(resp *http.Response) (result ListKeysResponse, err error) { + type page struct { + Values []AccessKey `json:"value"` + NextLink *string `json:"nextLink"` + } + var respObj page + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&respObj), + autorest.ByClosing()) + result.HttpResponse = resp + result.Model = &respObj.Values + result.nextLink = respObj.NextLink + if respObj.NextLink != nil { + result.nextPageFunc = func(ctx context.Context, nextLink string) (result ListKeysResponse, err error) { + req, err := c.preparerForListKeysWithNextLink(ctx, nextLink) + if err != nil { + err = autorest.NewErrorWithError(err, "configurationstores.ConfigurationStoresClient", "ListKeys", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "configurationstores.ConfigurationStoresClient", "ListKeys", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForListKeys(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "configurationstores.ConfigurationStoresClient", "ListKeys", result.HttpResponse, "Failure responding to request") + return + } + + return + } + } + return +} diff --git a/azurerm/internal/services/appconfiguration/sdk/configurationstores/method_update_autorest.go b/azurerm/internal/services/appconfiguration/sdk/configurationstores/method_update_autorest.go new file mode 100644 index 000000000000..810ebbff1b64 --- /dev/null +++ b/azurerm/internal/services/appconfiguration/sdk/configurationstores/method_update_autorest.go @@ -0,0 +1,75 @@ +package configurationstores + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/polling" +) + +type UpdateResponse struct { + Poller polling.LongRunningPoller + HttpResponse *http.Response +} + +// Update ... +func (c ConfigurationStoresClient) Update(ctx context.Context, id ConfigurationStoreId, input ConfigurationStoreUpdateParameters) (result UpdateResponse, err error) { + req, err := c.preparerForUpdate(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "configurationstores.ConfigurationStoresClient", "Update", nil, "Failure preparing request") + return + } + + result, err = c.senderForUpdate(ctx, req) + if err != nil { + err = autorest.NewErrorWithError(err, "configurationstores.ConfigurationStoresClient", "Update", result.HttpResponse, "Failure sending request") + return + } + + return +} + +// UpdateThenPoll performs Update then polls until it's completed +func (c ConfigurationStoresClient) UpdateThenPoll(ctx context.Context, id ConfigurationStoreId, input ConfigurationStoreUpdateParameters) error { + result, err := c.Update(ctx, id, input) + if err != nil { + return fmt.Errorf("performing Update: %+v", err) + } + + if err := result.Poller.PollUntilDone(); err != nil { + return fmt.Errorf("polling after Update: %+v", err) + } + + return nil +} + +// preparerForUpdate prepares the Update request. +func (c ConfigurationStoresClient) preparerForUpdate(ctx context.Context, id ConfigurationStoreId, input ConfigurationStoreUpdateParameters) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// senderForUpdate sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (c ConfigurationStoresClient) senderForUpdate(ctx context.Context, req *http.Request) (future UpdateResponse, err error) { + var resp *http.Response + resp, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + return + } + future.Poller, err = polling.NewLongRunningPollerFromResponse(ctx, resp, c.Client) + return +} diff --git a/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_accesskey.go b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_accesskey.go new file mode 100644 index 000000000000..4a9dc9adb1f3 --- /dev/null +++ b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_accesskey.go @@ -0,0 +1,25 @@ +package configurationstores + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/formatting" +) + +type AccessKey struct { + ConnectionString *string `json:"connectionString,omitempty"` + ID *string `json:"id,omitempty"` + LastModified *string `json:"lastModified,omitempty"` + Name *string `json:"name,omitempty"` + ReadOnly *bool `json:"readOnly,omitempty"` + Value *string `json:"value,omitempty"` +} + +func (o AccessKey) ListLastModifiedAsTime() (*time.Time, error) { + return formatting.ParseAsDateFormat(o.LastModified, "2006-01-02T15:04:05Z07:00") +} + +func (o AccessKey) SetLastModifiedAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastModified = &formatted +} diff --git a/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_configurationstore.go b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_configurationstore.go new file mode 100644 index 000000000000..ed42f14301e6 --- /dev/null +++ b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_configurationstore.go @@ -0,0 +1,12 @@ +package configurationstores + +type ConfigurationStore struct { + Id *string `json:"id,omitempty"` + Identity *ResourceIdentity `json:"identity,omitempty"` + Location string `json:"location"` + Name *string `json:"name,omitempty"` + Properties *ConfigurationStoreProperties `json:"properties,omitempty"` + Sku Sku `json:"sku"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_configurationstoreproperties.go b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_configurationstoreproperties.go new file mode 100644 index 000000000000..5beb569402f8 --- /dev/null +++ b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_configurationstoreproperties.go @@ -0,0 +1,25 @@ +package configurationstores + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/formatting" +) + +type ConfigurationStoreProperties struct { + CreationDate *string `json:"creationDate,omitempty"` + Encryption *EncryptionProperties `json:"encryption,omitempty"` + Endpoint *string `json:"endpoint,omitempty"` + PrivateEndpointConnections *[]PrivateEndpointConnection `json:"privateEndpointConnections,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + PublicNetworkAccess *PublicNetworkAccess `json:"publicNetworkAccess,omitempty"` +} + +func (o ConfigurationStoreProperties) ListCreationDateAsTime() (*time.Time, error) { + return formatting.ParseAsDateFormat(o.CreationDate, "2006-01-02T15:04:05Z07:00") +} + +func (o ConfigurationStoreProperties) SetCreationDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CreationDate = &formatted +} diff --git a/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_configurationstorepropertiesupdateparameters.go b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_configurationstorepropertiesupdateparameters.go new file mode 100644 index 000000000000..96ada82f5c6d --- /dev/null +++ b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_configurationstorepropertiesupdateparameters.go @@ -0,0 +1,6 @@ +package configurationstores + +type ConfigurationStorePropertiesUpdateParameters struct { + Encryption *EncryptionProperties `json:"encryption,omitempty"` + PublicNetworkAccess *PublicNetworkAccess `json:"publicNetworkAccess,omitempty"` +} diff --git a/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_configurationstoreupdateparameters.go b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_configurationstoreupdateparameters.go new file mode 100644 index 000000000000..86f2dcff447e --- /dev/null +++ b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_configurationstoreupdateparameters.go @@ -0,0 +1,8 @@ +package configurationstores + +type ConfigurationStoreUpdateParameters struct { + Identity *ResourceIdentity `json:"identity,omitempty"` + Properties *ConfigurationStorePropertiesUpdateParameters `json:"properties,omitempty"` + Sku *Sku `json:"sku,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` +} diff --git a/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_encryptionproperties.go b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_encryptionproperties.go new file mode 100644 index 000000000000..e53ec95700fc --- /dev/null +++ b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_encryptionproperties.go @@ -0,0 +1,5 @@ +package configurationstores + +type EncryptionProperties struct { + KeyVaultProperties *KeyVaultProperties `json:"keyVaultProperties,omitempty"` +} diff --git a/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_keyvaultproperties.go b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_keyvaultproperties.go new file mode 100644 index 000000000000..d013eaf7a730 --- /dev/null +++ b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_keyvaultproperties.go @@ -0,0 +1,6 @@ +package configurationstores + +type KeyVaultProperties struct { + IdentityClientId *string `json:"identityClientId,omitempty"` + KeyIdentifier *string `json:"keyIdentifier,omitempty"` +} diff --git a/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_privateendpoint.go b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_privateendpoint.go new file mode 100644 index 000000000000..91860fa0d079 --- /dev/null +++ b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_privateendpoint.go @@ -0,0 +1,5 @@ +package configurationstores + +type PrivateEndpoint struct { + Id *string `json:"id,omitempty"` +} diff --git a/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_privateendpointconnection.go b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_privateendpointconnection.go new file mode 100644 index 000000000000..4f64ddb79d0d --- /dev/null +++ b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_privateendpointconnection.go @@ -0,0 +1,8 @@ +package configurationstores + +type PrivateEndpointConnection struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *PrivateEndpointConnectionProperties `json:"properties,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_privateendpointconnectionproperties.go b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_privateendpointconnectionproperties.go new file mode 100644 index 000000000000..85fa5c76899e --- /dev/null +++ b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_privateendpointconnectionproperties.go @@ -0,0 +1,7 @@ +package configurationstores + +type PrivateEndpointConnectionProperties struct { + PrivateEndpoint *PrivateEndpoint `json:"privateEndpoint,omitempty"` + PrivateLinkServiceConnectionState PrivateLinkServiceConnectionState `json:"privateLinkServiceConnectionState"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` +} diff --git a/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_privatelinkserviceconnectionstate.go b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_privatelinkserviceconnectionstate.go new file mode 100644 index 000000000000..e42c185e0842 --- /dev/null +++ b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_privatelinkserviceconnectionstate.go @@ -0,0 +1,7 @@ +package configurationstores + +type PrivateLinkServiceConnectionState struct { + ActionsRequired *ActionsRequired `json:"actionsRequired,omitempty"` + Description *string `json:"description,omitempty"` + Status *ConnectionStatus `json:"status,omitempty"` +} diff --git a/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_resourceidentity.go b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_resourceidentity.go new file mode 100644 index 000000000000..59b9a84556d6 --- /dev/null +++ b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_resourceidentity.go @@ -0,0 +1,7 @@ +package configurationstores + +type ResourceIdentity struct { + PrincipalId *string `json:"principalId,omitempty"` + TenantId *string `json:"tenantId,omitempty"` + Type *IdentityType `json:"type,omitempty"` +} diff --git a/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_sku.go b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_sku.go new file mode 100644 index 000000000000..6a4664e6af21 --- /dev/null +++ b/azurerm/internal/services/appconfiguration/sdk/configurationstores/model_sku.go @@ -0,0 +1,5 @@ +package configurationstores + +type Sku struct { + Name string `json:"name"` +} diff --git a/azurerm/internal/services/appconfiguration/sdk/configurationstores/version.go b/azurerm/internal/services/appconfiguration/sdk/configurationstores/version.go new file mode 100644 index 000000000000..e5aea194ad34 --- /dev/null +++ b/azurerm/internal/services/appconfiguration/sdk/configurationstores/version.go @@ -0,0 +1,9 @@ +package configurationstores + +import "fmt" + +const defaultApiVersion = "2020-06-01" + +func userAgent() string { + return fmt.Sprintf("pandora/configurationstores/%s", defaultApiVersion) +} diff --git a/azurerm/internal/services/appconfiguration/transform.go b/azurerm/internal/services/appconfiguration/transform.go new file mode 100644 index 000000000000..84a1fb62887f --- /dev/null +++ b/azurerm/internal/services/appconfiguration/transform.go @@ -0,0 +1,26 @@ +package appconfiguration + +import "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" + +func flattenTags(input *map[string]string) map[string]*string { + output := make(map[string]*string) + if input == nil { + return output + } + + for k, v := range *input { + output[k] = utils.String(v) + } + + return output +} + +func expandTags(input map[string]interface{}) *map[string]string { + output := make(map[string]string) + + for k, v := range input { + output[k] = v.(string) + } + + return &output +} diff --git a/azurerm/internal/services/appconfiguration/validate/configuration_store_id.go b/azurerm/internal/services/appconfiguration/validate/configuration_store_id.go deleted file mode 100644 index 9ae4995e247e..000000000000 --- a/azurerm/internal/services/appconfiguration/validate/configuration_store_id.go +++ /dev/null @@ -1,23 +0,0 @@ -package validate - -// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten - -import ( - "fmt" - - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/appconfiguration/parse" -) - -func ConfigurationStoreID(input interface{}, key string) (warnings []string, errors []error) { - v, ok := input.(string) - if !ok { - errors = append(errors, fmt.Errorf("expected %q to be a string", key)) - return - } - - if _, err := parse.ConfigurationStoreID(v); err != nil { - errors = append(errors, err) - } - - return -} diff --git a/azurerm/internal/services/appconfiguration/validate/configuration_store_id_test.go b/azurerm/internal/services/appconfiguration/validate/configuration_store_id_test.go deleted file mode 100644 index b1a52db37d5d..000000000000 --- a/azurerm/internal/services/appconfiguration/validate/configuration_store_id_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package validate_test - -// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten - -import ( - "testing" - - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/appconfiguration/validate" -) - -func TestConfigurationStoreID(t *testing.T) { - cases := []struct { - Input string - Valid bool - }{ - - { - // empty - Input: "", - Valid: false, - }, - - { - // missing SubscriptionId - Input: "/", - Valid: false, - }, - - { - // missing value for SubscriptionId - Input: "/subscriptions/", - Valid: false, - }, - - { - // missing ResourceGroup - Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", - Valid: false, - }, - - { - // missing value for ResourceGroup - Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", - Valid: false, - }, - - { - // missing Name - Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.AppConfiguration/", - Valid: false, - }, - - { - // missing value for Name - Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.AppConfiguration/configurationStores/", - Valid: false, - }, - - { - // valid - Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.AppConfiguration/configurationStores/store1", - Valid: true, - }, - - { - // upper-cased - Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/GROUP1/PROVIDERS/MICROSOFT.APPCONFIGURATION/CONFIGURATIONSTORES/STORE1", - Valid: false, - }, - } - for _, tc := range cases { - t.Logf("[DEBUG] Testing Value %s", tc.Input) - _, errors := validate.ConfigurationStoreID(tc.Input, "test") - valid := len(errors) == 0 - - if tc.Valid != valid { - t.Fatalf("Expected %t but got %t", tc.Valid, valid) - } - } -} diff --git a/azurerm/internal/services/applicationinsights/application_insights_smart_detection_rule_resource.go b/azurerm/internal/services/applicationinsights/application_insights_smart_detection_rule_resource.go index f4908743e982..0f90a3e404d4 100644 --- a/azurerm/internal/services/applicationinsights/application_insights_smart_detection_rule_resource.go +++ b/azurerm/internal/services/applicationinsights/application_insights_smart_detection_rule_resource.go @@ -6,11 +6,10 @@ import ( "strings" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/applicationinsights/parse" - "github.com/Azure/azure-sdk-for-go/services/appinsights/mgmt/2015-05-01/insights" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/applicationinsights/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" diff --git a/azurerm/internal/services/applicationinsights/application_insights_smart_detection_rule_resource_test.go b/azurerm/internal/services/applicationinsights/application_insights_smart_detection_rule_resource_test.go index b63a7046e642..5b3e3b0acb20 100644 --- a/azurerm/internal/services/applicationinsights/application_insights_smart_detection_rule_resource_test.go +++ b/azurerm/internal/services/applicationinsights/application_insights_smart_detection_rule_resource_test.go @@ -6,11 +6,10 @@ import ( "net/http" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/applicationinsights/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/applicationinsights/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) diff --git a/azurerm/internal/services/authorization/parse/role_assignment.go b/azurerm/internal/services/authorization/parse/role_assignment.go index cd32e7a26200..c60de5e5ed26 100644 --- a/azurerm/internal/services/authorization/parse/role_assignment.go +++ b/azurerm/internal/services/authorization/parse/role_assignment.go @@ -8,13 +8,16 @@ import ( ) type RoleAssignmentId struct { - SubscriptionID string - ResourceGroup string - ManagementGroup string - Name string + SubscriptionID string + ResourceGroup string + ManagementGroup string + ResourceScope string + ResourceProvider string + Name string + TenantId string } -func NewRoleAssignmentID(subscriptionId, resourceGroup, managementGroup, name string) (*RoleAssignmentId, error) { +func NewRoleAssignmentID(subscriptionId, resourceGroup, resourceProvider, resourceScope, managementGroup, name, tenantId string) (*RoleAssignmentId, error) { if subscriptionId == "" && resourceGroup == "" && managementGroup == "" { return nil, fmt.Errorf("one of subscriptionId, resourceGroup, or managementGroup must be provided") } @@ -32,14 +35,24 @@ func NewRoleAssignmentID(subscriptionId, resourceGroup, managementGroup, name st } return &RoleAssignmentId{ - SubscriptionID: subscriptionId, - ResourceGroup: resourceGroup, - ManagementGroup: managementGroup, - Name: name, + SubscriptionID: subscriptionId, + ResourceGroup: resourceGroup, + ResourceProvider: resourceProvider, + ResourceScope: resourceScope, + ManagementGroup: managementGroup, + Name: name, + TenantId: tenantId, }, nil } -func (id RoleAssignmentId) ID() string { +// in general case, the id format does not change +// for cross tenant scenario, add the tenantId info +func (id RoleAssignmentId) AzureResourceID() string { + if id.ResourceScope != "" { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/%s/%s/providers/Microsoft.Authorization/roleAssignments/%s" + return fmt.Sprintf(fmtString, id.SubscriptionID, id.ResourceGroup, id.ResourceProvider, id.ResourceScope, id.Name) + } + if id.ManagementGroup != "" { fmtString := "/providers/Microsoft.Management/managementGroups/%s/providers/Microsoft.Authorization/roleAssignments/%s" return fmt.Sprintf(fmtString, id.ManagementGroup, id.Name) @@ -54,6 +67,17 @@ func (id RoleAssignmentId) ID() string { return fmt.Sprintf(fmtString, id.SubscriptionID, id.Name) } +func (id RoleAssignmentId) ID() string { + return ConstructRoleAssignmentId(id.AzureResourceID(), id.TenantId) +} + +func ConstructRoleAssignmentId(azureResourceId, tenantId string) string { + if tenantId == "" { + return azureResourceId + } + return fmt.Sprintf("%s|%s", azureResourceId, tenantId) +} + func RoleAssignmentID(input string) (*RoleAssignmentId, error) { if len(input) == 0 { return nil, fmt.Errorf("Role Assignment ID is empty string") @@ -61,6 +85,12 @@ func RoleAssignmentID(input string) (*RoleAssignmentId, error) { roleAssignmentId := RoleAssignmentId{} + parts := strings.Split(input, "|") + if len(parts) == 2 { + roleAssignmentId.TenantId = parts[1] + input = parts[0] + } + switch { case strings.HasPrefix(input, "/subscriptions/"): id, err := azure.ParseAzureResourceID(input) @@ -69,6 +99,15 @@ func RoleAssignmentID(input string) (*RoleAssignmentId, error) { } roleAssignmentId.SubscriptionID = id.SubscriptionID roleAssignmentId.ResourceGroup = id.ResourceGroup + if id.Provider != "Microsoft.Authorization" && id.Provider != "" { + roleAssignmentId.ResourceProvider = id.Provider + // logic to save resource scope + result := strings.Split(input, "/providers/") + if len(result) == 3 { + roleAssignmentId.ResourceScope = strings.TrimPrefix(result[1], fmt.Sprintf("%s/", id.Provider)) + } + } + if roleAssignmentId.Name, err = id.PopSegment("roleAssignments"); err != nil { return nil, err } diff --git a/azurerm/internal/services/authorization/parse/role_assignment_test.go b/azurerm/internal/services/authorization/parse/role_assignment_test.go index c040acd2689c..0917eeb90840 100644 --- a/azurerm/internal/services/authorization/parse/role_assignment_test.go +++ b/azurerm/internal/services/authorization/parse/role_assignment_test.go @@ -10,55 +10,89 @@ var _ resourceid.Formatter = RoleAssignmentId{} func TestRoleAssignmentIDFormatter(t *testing.T) { testData := []struct { - SubscriptionId string - ResourceGroup string - ManagementGroup string - Name string - Expected string + SubscriptionId string + ResourceGroup string + ResourceProvider string + ResourceScope string + ManagementGroup string + Name string + TenantId string + Expected string }{ { SubscriptionId: "", ResourceGroup: "", + ResourceScope: "", ManagementGroup: "", Name: "23456781-2349-8764-5631-234567890121", + TenantId: "", }, { SubscriptionId: "12345678-1234-9876-4563-123456789012", ResourceGroup: "group1", + ResourceScope: "", ManagementGroup: "managementGroup1", Name: "23456781-2349-8764-5631-234567890121", + TenantId: "", }, { SubscriptionId: "12345678-1234-9876-4563-123456789012", ResourceGroup: "", + ResourceScope: "", ManagementGroup: "managementGroup1", Name: "23456781-2349-8764-5631-234567890121", + TenantId: "", }, { SubscriptionId: "12345678-1234-9876-4563-123456789012", ResourceGroup: "", + ResourceScope: "", ManagementGroup: "", Name: "23456781-2349-8764-5631-234567890121", + TenantId: "", Expected: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.Authorization/roleAssignments/23456781-2349-8764-5631-234567890121", }, { SubscriptionId: "12345678-1234-9876-4563-123456789012", ResourceGroup: "group1", + ResourceScope: "", ManagementGroup: "", Name: "23456781-2349-8764-5631-234567890121", + TenantId: "", Expected: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.Authorization/roleAssignments/23456781-2349-8764-5631-234567890121", }, { SubscriptionId: "", ResourceGroup: "", + ResourceScope: "", ManagementGroup: "12345678-1234-9876-4563-123456789012", Name: "23456781-2349-8764-5631-234567890121", + TenantId: "", Expected: "/providers/Microsoft.Management/managementGroups/12345678-1234-9876-4563-123456789012/providers/Microsoft.Authorization/roleAssignments/23456781-2349-8764-5631-234567890121", }, + { + SubscriptionId: "", + ResourceGroup: "", + ResourceScope: "", + ManagementGroup: "12345678-1234-9876-4563-123456789012", + Name: "23456781-2349-8764-5631-234567890121", + TenantId: "34567812-3456-7653-6742-345678901234", + Expected: "/providers/Microsoft.Management/managementGroups/12345678-1234-9876-4563-123456789012/providers/Microsoft.Authorization/roleAssignments/23456781-2349-8764-5631-234567890121|34567812-3456-7653-6742-345678901234", + }, + { + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "group1", + ResourceProvider: "Microsoft.Storage", + ResourceScope: "storageAccounts/nameStorageAccount", + ManagementGroup: "", + Name: "23456781-2349-8764-5631-234567890121", + TenantId: "34567812-3456-7653-6742-345678901234", + Expected: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.Storage/storageAccounts/nameStorageAccount/providers/Microsoft.Authorization/roleAssignments/23456781-2349-8764-5631-234567890121|34567812-3456-7653-6742-345678901234", + }, } for _, v := range testData { t.Logf("testing %+v", v) - actual, err := NewRoleAssignmentID(v.SubscriptionId, v.ResourceGroup, v.ManagementGroup, v.Name) + actual, err := NewRoleAssignmentID(v.SubscriptionId, v.ResourceGroup, v.ResourceProvider, v.ResourceScope, v.ManagementGroup, v.Name, v.TenantId) if err != nil { if v.Expected == "" { continue @@ -125,6 +159,7 @@ func TestRoleAssignmentID(t *testing.T) { Expected: &RoleAssignmentId{ SubscriptionID: "12345678-1234-9876-4563-123456789012", ResourceGroup: "", + ResourceScope: "", ManagementGroup: "", Name: "23456781-2349-8764-5631-234567890121", }, @@ -151,6 +186,40 @@ func TestRoleAssignmentID(t *testing.T) { Name: "23456781-2349-8764-5631-234567890121", }, }, + { + Input: "/providers/Microsoft.Management/managementGroups/managementGroup1/providers/Microsoft.Authorization/roleAssignments/23456781-2349-8764-5631-234567890121|34567812-3456-7653-6742-345678901234", + Expected: &RoleAssignmentId{ + SubscriptionID: "", + ResourceGroup: "", + ManagementGroup: "managementGroup1", + Name: "23456781-2349-8764-5631-234567890121", + TenantId: "34567812-3456-7653-6742-345678901234", + }, + }, + { + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.Storage/storageAccounts/nameStorageAccount/providers/Microsoft.Authorization/roleAssignments/23456781-2349-8764-5631-234567890121|34567812-3456-7653-6742-345678901234", + Expected: &RoleAssignmentId{ + SubscriptionID: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "group1", + ResourceProvider: "Microsoft.Storage", + ResourceScope: "storageAccounts/nameStorageAccount", + ManagementGroup: "", + Name: "23456781-2349-8764-5631-234567890121", + TenantId: "34567812-3456-7653-6742-345678901234", + }, + }, + { + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.AppPlatform/Spring/spring1/apps/app1/providers/Microsoft.Authorization/roleAssignments/23456781-2349-8764-5631-234567890121|34567812-3456-7653-6742-345678901234", + Expected: &RoleAssignmentId{ + SubscriptionID: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "group1", + ResourceProvider: "Microsoft.AppPlatform", + ResourceScope: "Spring/spring1/apps/app1", + ManagementGroup: "", + Name: "23456781-2349-8764-5631-234567890121", + TenantId: "34567812-3456-7653-6742-345678901234", + }, + }, } for _, v := range testData { @@ -174,15 +243,23 @@ func TestRoleAssignmentID(t *testing.T) { } if actual.SubscriptionID != v.Expected.SubscriptionID { - t.Fatalf("Expected %q but got %q for Role Assignment Name", v.Expected.SubscriptionID, actual.SubscriptionID) + t.Fatalf("Expected %q but got %q for Role Assignment Subscription ID", v.Expected.SubscriptionID, actual.SubscriptionID) } if actual.ResourceGroup != v.Expected.ResourceGroup { - t.Fatalf("Expected %q but got %q for Role Assignment Name", v.Expected.ResourceGroup, actual.ResourceGroup) + t.Fatalf("Expected %q but got %q for Role Assignment Resource Group", v.Expected.ResourceGroup, actual.ResourceGroup) + } + + if actual.ResourceProvider != v.Expected.ResourceProvider { + t.Fatalf("Expected %q but got %q for Role Assignment Resource Provider", v.Expected.ResourceProvider, actual.ResourceProvider) + } + + if actual.ResourceScope != v.Expected.ResourceScope { + t.Fatalf("Expected %q but got %q for Role Assignment Resource Scope", v.Expected.ResourceScope, actual.ResourceScope) } if actual.ManagementGroup != v.Expected.ManagementGroup { - t.Fatalf("Expected %q but got %q for Role Assignment Name", v.Expected.ManagementGroup, actual.ManagementGroup) + t.Fatalf("Expected %q but got %q for Role Assignment Management Group", v.Expected.ManagementGroup, actual.ManagementGroup) } } } diff --git a/azurerm/internal/services/authorization/role_assignment_resource.go b/azurerm/internal/services/authorization/role_assignment_resource.go index c26146f4da24..79bfe53ca606 100644 --- a/azurerm/internal/services/authorization/role_assignment_resource.go +++ b/azurerm/internal/services/authorization/role_assignment_resource.go @@ -8,10 +8,12 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2020-04-01-preview/authorization" + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-11-01/subscriptions" "github.com/hashicorp/go-uuid" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/authorization/parse" billingValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/billing/validate" managementGroupValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/managementgroup/validate" resourceValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/validate" @@ -96,6 +98,13 @@ func resourceArmRoleAssignment() *pluginsdk.Resource { Computed: true, }, + "delegated_managed_identity_resource_id": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + }, + "description": { Type: pluginsdk.TypeString, Optional: true, @@ -128,6 +137,8 @@ func resourceArmRoleAssignment() *pluginsdk.Resource { func resourceArmRoleAssignmentCreate(d *pluginsdk.ResourceData, meta interface{}) error { roleAssignmentsClient := meta.(*clients.Client).Authorization.RoleAssignmentsClient roleDefinitionsClient := meta.(*clients.Client).Authorization.RoleDefinitionsClient + subscriptionClient := meta.(*clients.Client).Subscription.Client + subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -163,7 +174,17 @@ func resourceArmRoleAssignmentCreate(d *pluginsdk.ResourceData, meta interface{} name = uuid } - existing, err := roleAssignmentsClient.Get(ctx, scope, name, "") + tenantId := "" + delegatedManagedIdentityResourceID := d.Get("delegated_managed_identity_resource_id").(string) + if len(delegatedManagedIdentityResourceID) > 0 { + var err error + tenantId, err = getTenantIdBySubscriptionId(ctx, subscriptionClient, subscriptionId) + if err != nil { + return err + } + } + + existing, err := roleAssignmentsClient.Get(ctx, scope, name, tenantId) if err != nil { if !utils.ResponseWasNotFound(existing.Response) { return fmt.Errorf("Error checking for presence of existing Role Assignment ID for %q (Scope %q): %+v", name, scope, err) @@ -182,6 +203,10 @@ func resourceArmRoleAssignmentCreate(d *pluginsdk.ResourceData, meta interface{} }, } + if len(delegatedManagedIdentityResourceID) > 0 { + properties.RoleAssignmentProperties.DelegatedManagedIdentityResourceID = utils.String(delegatedManagedIdentityResourceID) + } + condition := d.Get("condition").(string) conditionVersion := d.Get("condition_version").(string) @@ -197,11 +222,11 @@ func resourceArmRoleAssignmentCreate(d *pluginsdk.ResourceData, meta interface{} properties.RoleAssignmentProperties.PrincipalType = authorization.ServicePrincipal } - if err := pluginsdk.Retry(d.Timeout(pluginsdk.TimeoutCreate), retryRoleAssignmentsClient(d, scope, name, properties, meta)); err != nil { + if err := pluginsdk.Retry(d.Timeout(pluginsdk.TimeoutCreate), retryRoleAssignmentsClient(d, scope, name, properties, meta, tenantId)); err != nil { return err } - read, err := roleAssignmentsClient.Get(ctx, scope, name, "") + read, err := roleAssignmentsClient.Get(ctx, scope, name, tenantId) if err != nil { return err } @@ -209,7 +234,7 @@ func resourceArmRoleAssignmentCreate(d *pluginsdk.ResourceData, meta interface{} return fmt.Errorf("Cannot read Role Assignment ID for %q (Scope %q)", name, scope) } - d.SetId(*read.ID) + d.SetId(parse.ConstructRoleAssignmentId(*read.ID, tenantId)) return resourceArmRoleAssignmentRead(d, meta) } @@ -219,7 +244,11 @@ func resourceArmRoleAssignmentRead(d *pluginsdk.ResourceData, meta interface{}) ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - resp, err := client.GetByID(ctx, d.Id(), "") + id, err := parse.RoleAssignmentID(d.Id()) + if err != nil { + return err + } + resp, err := client.GetByID(ctx, id.AzureResourceID(), id.TenantId) if err != nil { if utils.ResponseWasNotFound(resp.Response) { log.Printf("[DEBUG] Role Assignment ID %q was not found - removing from state", d.Id()) @@ -237,6 +266,7 @@ func resourceArmRoleAssignmentRead(d *pluginsdk.ResourceData, meta interface{}) d.Set("role_definition_id", props.RoleDefinitionID) d.Set("principal_id", props.PrincipalID) d.Set("principal_type", props.PrincipalType) + d.Set("delegated_managed_identity_resource_id", props.DelegatedManagedIdentityResourceID) d.Set("description", props.Description) d.Set("condition", props.Condition) d.Set("condition_version", props.ConditionVersion) @@ -267,7 +297,7 @@ func resourceArmRoleAssignmentDelete(d *pluginsdk.ResourceData, meta interface{} return err } - resp, err := client.Delete(ctx, id.scope, id.name, "") + resp, err := client.Delete(ctx, id.scope, id.name, id.tenantId) if err != nil { if !utils.ResponseWasNotFound(resp.Response) { return err @@ -277,7 +307,8 @@ func resourceArmRoleAssignmentDelete(d *pluginsdk.ResourceData, meta interface{} return nil } -func retryRoleAssignmentsClient(d *pluginsdk.ResourceData, scope string, name string, properties authorization.RoleAssignmentCreateParameters, meta interface{}) func() *pluginsdk.RetryError { +//lintignore:R006 +func retryRoleAssignmentsClient(d *pluginsdk.ResourceData, scope string, name string, properties authorization.RoleAssignmentCreateParameters, meta interface{}, tenantId string) func() *pluginsdk.RetryError { return func() *pluginsdk.RetryError { roleAssignmentsClient := meta.(*clients.Client).Authorization.RoleAssignmentsClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) @@ -306,13 +337,13 @@ func retryRoleAssignmentsClient(d *pluginsdk.ResourceData, scope string, name st Target: []string{ "ready", }, - Refresh: roleAssignmentCreateStateRefreshFunc(ctx, roleAssignmentsClient, *resp.ID), + Refresh: roleAssignmentCreateStateRefreshFunc(ctx, roleAssignmentsClient, *resp.ID, tenantId), MinTimeout: 5 * time.Second, ContinuousTargetOccurence: 5, Timeout: d.Timeout(pluginsdk.TimeoutCreate), } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return pluginsdk.NonRetryableError(fmt.Errorf("failed waiting for Role Assignment %q to finish replicating: %+v", name, err)) } @@ -321,27 +352,36 @@ func retryRoleAssignmentsClient(d *pluginsdk.ResourceData, scope string, name st } type roleAssignmentId struct { - scope string - name string + scope string + name string + tenantId string } func parseRoleAssignmentId(input string) (*roleAssignmentId, error) { - segments := strings.Split(input, "/providers/Microsoft.Authorization/roleAssignments/") + tenantId := "" + segments := strings.Split(input, "|") + if len(segments) == 2 { + tenantId = segments[1] + input = segments[0] + } + + segments = strings.Split(input, "/providers/Microsoft.Authorization/roleAssignments/") if len(segments) != 2 { return nil, fmt.Errorf("Expected Role Assignment ID to be in the format `{scope}/providers/Microsoft.Authorization/roleAssignments/{name}` but got %q", input) } // /{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName} id := roleAssignmentId{ - scope: strings.TrimPrefix(segments[0], "/"), - name: segments[1], + scope: strings.TrimPrefix(segments[0], "/"), + name: segments[1], + tenantId: tenantId, } return &id, nil } -func roleAssignmentCreateStateRefreshFunc(ctx context.Context, client *authorization.RoleAssignmentsClient, roleID string) pluginsdk.StateRefreshFunc { +func roleAssignmentCreateStateRefreshFunc(ctx context.Context, client *authorization.RoleAssignmentsClient, roleID string, tenantId string) pluginsdk.StateRefreshFunc { return func() (interface{}, string, error) { - resp, err := client.GetByID(ctx, roleID, "") + resp, err := client.GetByID(ctx, roleID, tenantId) if err != nil { if utils.ResponseWasNotFound(resp.Response) { return resp, "pending", nil @@ -351,3 +391,14 @@ func roleAssignmentCreateStateRefreshFunc(ctx context.Context, client *authoriza return resp, "ready", nil } } + +func getTenantIdBySubscriptionId(ctx context.Context, client *subscriptions.Client, subscriptionId string) (string, error) { + resp, err := client.Get(ctx, subscriptionId) + if err != nil { + return "", fmt.Errorf("get tenant Id by Subscription %s: %+v", subscriptionId, err) + } + if resp.TenantID == nil { + return "", fmt.Errorf("tenant Id is nil by Subscription %s: %+v", subscriptionId, resp) + } + return *resp.TenantID, nil +} diff --git a/azurerm/internal/services/authorization/role_assignment_resource_test.go b/azurerm/internal/services/authorization/role_assignment_resource_test.go index 58f8fc5c61ab..30d9a18ccd79 100644 --- a/azurerm/internal/services/authorization/role_assignment_resource_test.go +++ b/azurerm/internal/services/authorization/role_assignment_resource_test.go @@ -3,6 +3,7 @@ package authorization_test import ( "context" "fmt" + "os" "testing" "github.com/google/uuid" @@ -127,6 +128,28 @@ func TestAccRoleAssignment_custom(t *testing.T) { }) } +// delegatedManagedIdentityResourceID is used in a cross tenant scenario. +// users should set up lighthouse delegation first and then use managing tenant SP to run this test. +func TestAccRoleAssignment_delegatedManagedIdentityResourceID(t *testing.T) { + if os.Getenv("HAS_LIGHTHOUSE_DELEGATION_SETUP") == "" { + t.Skip("Skipping as HAS_LIGHTHOUSE_DELEGATION_SETUP is not specified") + return + } + + data := acceptance.BuildTestData(t, "azurerm_role_assignment", "test") + r := RoleAssignmentResource{} + + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ + { + Config: r.delegatedManagedIdentityResourceID(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func TestAccRoleAssignment_ServicePrincipal(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_role_assignment", "test") ri := acceptance.RandTimeInt() @@ -213,6 +236,23 @@ func TestAccRoleAssignment_condition(t *testing.T) { }) } +func TestAccRoleAssignment_resourceScoped(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_role_assignment", "test") + id := uuid.New().String() + + r := RoleAssignmentResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.roleResourceScoped(data, id), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("skip_service_principal_aad_check"), + }) +} + func (r RoleAssignmentResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.RoleAssignmentID(state.ID) if err != nil { @@ -268,6 +308,42 @@ resource "azurerm_role_assignment" "test" { `, id) } +func (RoleAssignmentResource) roleResourceScoped(data acceptance.TestData, id string) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +data "azurerm_client_config" "test" { +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-role-assigment-%d" + location = "%s" +} + +resource "azurerm_storage_account" "test" { + name = "unlikely23xst2acct%s" + resource_group_name = azurerm_resource_group.test.name + + location = azurerm_resource_group.test.location + account_tier = "Standard" + account_replication_type = "LRS" + + tags = { + environment = "production" + } +} + +resource "azurerm_role_assignment" "test" { + name = "%s" + scope = azurerm_storage_account.test.id + role_definition_name = "Storage Account Contributor" + principal_id = data.azurerm_client_config.test.object_id +} +`, data.RandomInteger, data.Locations.Primary, data.RandomString, id) +} + func (RoleAssignmentResource) requiresImportConfig(id string) string { return fmt.Sprintf(` %s @@ -364,6 +440,73 @@ resource "azurerm_role_assignment" "test" { `, roleDefinitionId, rInt, roleAssignmentId) } +func (RoleAssignmentResource) delegatedManagedIdentityResourceID(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +data "azurerm_subscription" "primary" { +} + +data "azurerm_client_config" "test" { +} + +resource "azurerm_policy_definition" "test" { + name = "acctestpol-%d" + policy_type = "Custom" + mode = "Indexed" + display_name = "acctestpol-%d" + + policy_rule = < 512 { + errors = append(errors, fmt.Errorf("length should be less than %d", 512)) + return + } + + return +} diff --git a/azurerm/internal/services/bot/validate/bot_channel_registration_description_test.go b/azurerm/internal/services/bot/validate/bot_channel_registration_description_test.go new file mode 100644 index 000000000000..822ee10c5c0e --- /dev/null +++ b/azurerm/internal/services/bot/validate/bot_channel_registration_description_test.go @@ -0,0 +1,38 @@ +package validate + +import ( + "strings" + "testing" +) + +func TestBotChannelRegistrationDescription(t *testing.T) { + testCases := []struct { + Input string + Expected bool + }{ + { + Input: "Test123", + Expected: true, + }, + { + Input: strings.Repeat("t", 511), + Expected: true, + }, + { + Input: strings.Repeat("t", 512), + Expected: true, + }, + { + Input: strings.Repeat("t", 513), + Expected: false, + }, + } + + for _, v := range testCases { + _, errors := BotChannelRegistrationDescription(v.Input, "description") + result := len(errors) == 0 + if result != v.Expected { + t.Fatalf("Expected the result to be %t but got %t (and %d errors)", v.Expected, result, len(errors)) + } + } +} diff --git a/azurerm/internal/services/bot/validate/bot_channel_registration_icon_url.go b/azurerm/internal/services/bot/validate/bot_channel_registration_icon_url.go new file mode 100644 index 000000000000..c5faed29ccac --- /dev/null +++ b/azurerm/internal/services/bot/validate/bot_channel_registration_icon_url.go @@ -0,0 +1,21 @@ +package validate + +import ( + "fmt" + "strings" +) + +func BotChannelRegistrationIconUrl(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) + return + } + + if !strings.HasSuffix(v, ".png") { + errors = append(errors, fmt.Errorf("only png is supported")) + return + } + + return +} diff --git a/azurerm/internal/services/bot/validate/bot_channel_registration_icon_url_test.go b/azurerm/internal/services/bot/validate/bot_channel_registration_icon_url_test.go new file mode 100644 index 000000000000..471ef93c5292 --- /dev/null +++ b/azurerm/internal/services/bot/validate/bot_channel_registration_icon_url_test.go @@ -0,0 +1,33 @@ +package validate + +import ( + "testing" +) + +func TestBotChannelRegistrationIconUrl(t *testing.T) { + testCases := []struct { + Input string + Expected bool + }{ + { + Input: "test.png", + Expected: true, + }, + { + Input: "http://myicon.png", + Expected: true, + }, + { + Input: "test.jpg", + Expected: false, + }, + } + + for _, v := range testCases { + _, errors := BotChannelRegistrationIconUrl(v.Input, "icon_url") + result := len(errors) == 0 + if result != v.Expected { + t.Fatalf("Expected the result to be %t but got %t (and %d errors)", v.Expected, result, len(errors)) + } + } +} diff --git a/azurerm/internal/services/bot/validate/bot_name.go b/azurerm/internal/services/bot/validate/bot_name.go new file mode 100644 index 000000000000..36131fb7ffcd --- /dev/null +++ b/azurerm/internal/services/bot/validate/bot_name.go @@ -0,0 +1,31 @@ +package validate + +import ( + "fmt" + "regexp" +) + +func BotName(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) + return + } + + if len(v) < 4 { + errors = append(errors, fmt.Errorf("length should be greater than %d", 4)) + return + } + + if len(v) > 42 { + errors = append(errors, fmt.Errorf("length should be less than %d", 42)) + return + } + + if !regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9_-]*$`).MatchString(v) { + errors = append(errors, fmt.Errorf("%q must start with a letter or digit and may only contain alphanumeric characters, underscores and dashes", k)) + return + } + + return +} diff --git a/azurerm/internal/services/bot/validate/bot_name_test.go b/azurerm/internal/services/bot/validate/bot_name_test.go new file mode 100644 index 000000000000..4af8c6674df1 --- /dev/null +++ b/azurerm/internal/services/bot/validate/bot_name_test.go @@ -0,0 +1,45 @@ +package validate + +import ( + "strings" + "testing" +) + +func TestBotName(t *testing.T) { + testCases := []struct { + Input string + Expected bool + }{ + { + Input: "Test123", + Expected: true, + }, + { + Input: "Test_123", + Expected: true, + }, + { + Input: "Test-123", + Expected: true, + }, + { + Input: strings.Repeat("s", 41), + Expected: true, + }, + { + Input: strings.Repeat("s", 42), + Expected: true, + }, + { + Input: strings.Repeat("s", 43), + Expected: false, + }, + } + for _, v := range testCases { + _, errors := BotName(v.Input, "bot_name") + result := len(errors) == 0 + if result != v.Expected { + t.Fatalf("Expected the result to be %t but got %t (and %d errors)", v.Expected, result, len(errors)) + } + } +} diff --git a/azurerm/internal/services/cdn/cdn_endpoint_resource.go b/azurerm/internal/services/cdn/cdn_endpoint_resource.go index 3d79012083e1..fc8a75e5c803 100644 --- a/azurerm/internal/services/cdn/cdn_endpoint_resource.go +++ b/azurerm/internal/services/cdn/cdn_endpoint_resource.go @@ -5,13 +5,12 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cdn/migration" - "github.com/Azure/azure-sdk-for-go/services/cdn/mgmt/2019-04-15/cdn" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cdn/migration" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cdn/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" diff --git a/azurerm/internal/services/cdn/cdn_profile_data_source.go b/azurerm/internal/services/cdn/cdn_profile_data_source.go index 9bb3188a7092..48d299f5b020 100644 --- a/azurerm/internal/services/cdn/cdn_profile_data_source.go +++ b/azurerm/internal/services/cdn/cdn_profile_data_source.go @@ -4,10 +4,9 @@ import ( "fmt" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cdn/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cdn/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" diff --git a/azurerm/internal/services/cdn/cdn_profile_resource.go b/azurerm/internal/services/cdn/cdn_profile_resource.go index ab1614db7b38..f7e7e4170984 100644 --- a/azurerm/internal/services/cdn/cdn_profile_resource.go +++ b/azurerm/internal/services/cdn/cdn_profile_resource.go @@ -5,13 +5,12 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cdn/migration" - "github.com/Azure/azure-sdk-for-go/services/cdn/mgmt/2019-04-15/cdn" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cdn/migration" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cdn/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" diff --git a/azurerm/internal/services/cognitive/client/client.go b/azurerm/internal/services/cognitive/client/client.go index 4442734569e0..a172c998fa5d 100644 --- a/azurerm/internal/services/cognitive/client/client.go +++ b/azurerm/internal/services/cognitive/client/client.go @@ -1,19 +1,24 @@ package client import ( - "github.com/Azure/azure-sdk-for-go/services/cognitiveservices/mgmt/2017-04-18/cognitiveservices" + "github.com/Azure/azure-sdk-for-go/services/cognitiveservices/mgmt/2021-04-30/cognitiveservices" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/common" ) type Client struct { - AccountsClient *cognitiveservices.AccountsClient + AccountsClient *cognitiveservices.AccountsClient + DeletedAccountsClient *cognitiveservices.DeletedAccountsClient } func NewClient(o *common.ClientOptions) *Client { accountsClient := cognitiveservices.NewAccountsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&accountsClient.Client, o.ResourceManagerAuthorizer) + deletedAccountsClient := cognitiveservices.NewDeletedAccountsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&deletedAccountsClient.Client, o.ResourceManagerAuthorizer) + return &Client{ - AccountsClient: &accountsClient, + AccountsClient: &accountsClient, + DeletedAccountsClient: &deletedAccountsClient, } } diff --git a/azurerm/internal/services/cognitive/cognitive_account_data_source.go b/azurerm/internal/services/cognitive/cognitive_account_data_source.go index b2f892955a57..6e395f653e29 100644 --- a/azurerm/internal/services/cognitive/cognitive_account_data_source.go +++ b/azurerm/internal/services/cognitive/cognitive_account_data_source.go @@ -6,6 +6,8 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cognitive/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" @@ -69,35 +71,28 @@ func dataSourceCognitiveAccount() *pluginsdk.Resource { func dataSourceCognitiveAccountRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cognitive.AccountsClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() - name := d.Get("name").(string) - resourceGroup := d.Get("resource_group_name").(string) - - resp, err := client.GetProperties(ctx, resourceGroup, name) + id := parse.NewAccountID(subscriptionId, d.Get("resource_group_name").(string), d.Get("name").(string)) + resp, err := client.Get(ctx, id.ResourceGroup, id.Name) if err != nil { if utils.ResponseWasNotFound(resp.Response) { - return fmt.Errorf("Error: Cognitive Services Account %q (Resource Group %q) was not found", name, resourceGroup) + return fmt.Errorf("%s was not found", id) } - return fmt.Errorf("Error reading the state of AzureRM Cognitive Services Account %q: %+v", name, err) + return fmt.Errorf("retrieving %s: %+v", id, err) } - keys, err := client.ListKeys(ctx, resourceGroup, name) + keys, err := client.ListKeys(ctx, id.ResourceGroup, id.Name) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { - return fmt.Errorf("Error: Keys for Cognitive Services Account %q (Resource Group %q) were not found", name, resourceGroup) - } - return fmt.Errorf("Error obtaining keys for Cognitive Services Account %q in Resource Group %q: %v", name, resourceGroup, err) + // TODO: gracefully fail here + return fmt.Errorf("retrieving Keys for %s: %+v", id, err) } - d.SetId(*resp.ID) - - if location := resp.Location; location != nil { - d.Set("location", azure.NormalizeLocation(*location)) - } + d.SetId(id.ID()) + d.Set("location", location.NormalizeNilable(resp.Location)) d.Set("kind", resp.Kind) - if sku := resp.Sku; sku != nil { d.Set("sku_name", sku.Name) } diff --git a/azurerm/internal/services/cognitive/cognitive_account_resource.go b/azurerm/internal/services/cognitive/cognitive_account_resource.go index 58abdbd87313..01e6c1e58001 100644 --- a/azurerm/internal/services/cognitive/cognitive_account_resource.go +++ b/azurerm/internal/services/cognitive/cognitive_account_resource.go @@ -6,17 +6,20 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/cognitiveservices/mgmt/2017-04-18/cognitiveservices" - "github.com/hashicorp/go-azure-helpers/response" + "github.com/Azure/azure-sdk-for-go/services/cognitiveservices/mgmt/2021-04-30/cognitiveservices" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" commonValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cognitive/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cognitive/validate" + msiparse "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/msi/parse" + msiValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/msi/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network" networkParse "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" + storageValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/storage/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/set" @@ -83,6 +86,7 @@ func resourceCognitiveAccount() *pluginsdk.Resource { "ImmersiveReader", "LUIS", "LUIS.Authoring", + "MetricsAdvisor", "Personalizer", "QnAMaker", "Recommendations", @@ -104,10 +108,95 @@ func resourceCognitiveAccount() *pluginsdk.Resource { }, false), }, - "qna_runtime_endpoint": { + "custom_subdomain_name": { Type: pluginsdk.TypeString, Optional: true, - ValidateFunc: validation.IsURLWithHTTPorHTTPS, + ForceNew: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "fqdns": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + + "identity": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "type": { + Type: pluginsdk.TypeString, + Optional: true, + Default: string(cognitiveservices.ResourceIdentityTypeNone), + ValidateFunc: validation.StringInSlice([]string{ + string(cognitiveservices.ResourceIdentityTypeNone), + string(cognitiveservices.ResourceIdentityTypeSystemAssigned), + string(cognitiveservices.ResourceIdentityTypeUserAssigned), + string(cognitiveservices.ResourceIdentityTypeSystemAssignedUserAssigned), + }, false), + }, + + "principal_id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "tenant_id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "identity_ids": { + Type: pluginsdk.TypeSet, + Optional: true, + MinItems: 1, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: msiValidate.UserAssignedIdentityID, + }, + }, + }, + }, + }, + + "local_auth_enabled": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: true, + }, + + "metrics_advisor_aad_client_id": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IsUUID, + }, + + "metrics_advisor_aad_tenant_id": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IsUUID, + }, + + "metrics_advisor_super_user_name": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "metrics_advisor_website_name": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringIsNotEmpty, }, "network_acls": { @@ -121,8 +210,8 @@ func resourceCognitiveAccount() *pluginsdk.Resource { Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ - string(cognitiveservices.Allow), - string(cognitiveservices.Deny), + string(cognitiveservices.NetworkRuleActionAllow), + string(cognitiveservices.NetworkRuleActionDeny), }, false), }, "ip_rules": { @@ -137,20 +226,77 @@ func resourceCognitiveAccount() *pluginsdk.Resource { }, Set: set.HashIPv4AddressOrCIDR, }, + // TODO 3.0 - Remove below property "virtual_network_subnet_ids": { - Type: pluginsdk.TypeSet, - Optional: true, - Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Type: pluginsdk.TypeSet, + Optional: true, + Computed: true, + ConflictsWith: []string{"network_acls.0.virtual_network_rules"}, + Deprecated: "Deprecated in favour of `virtual_network_rules`", + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + }, + + "virtual_network_rules": { + Type: pluginsdk.TypeSet, + Optional: true, + Computed: true, // TODO -- remove this when deprecation resolves + ConflictsWith: []string{"network_acls.0.virtual_network_subnet_ids"}, + ConfigMode: pluginsdk.SchemaConfigModeAttr, // TODO -- remove in 3.0, because this property is optional and computed, it has to be declared as empty array to remove existed values + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "subnet_id": { + Type: pluginsdk.TypeString, + Required: true, + }, + + "ignore_missing_vnet_service_endpoint": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: false, + }, + }, + }, }, }, }, }, - "custom_subdomain_name": { + "outbound_network_access_restrited": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: false, + }, + + "public_network_access_enabled": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: true, + }, + + "qna_runtime_endpoint": { Type: pluginsdk.TypeString, Optional: true, - ForceNew: true, - ValidateFunc: validation.StringIsNotEmpty, + ValidateFunc: validation.IsURLWithHTTPorHTTPS, + }, + + "storage": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "storage_account_id": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: storageValidate.StorageAccountID, + }, + + "identity_client_id": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.IsUUID, + }, + }, + }, }, "tags": tags.Schema(), @@ -177,32 +323,32 @@ func resourceCognitiveAccount() *pluginsdk.Resource { func resourceCognitiveAccountCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cognitive.AccountsClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() - name := d.Get("name").(string) - resourceGroup := d.Get("resource_group_name").(string) kind := d.Get("kind").(string) + id := parse.NewAccountID(subscriptionId, d.Get("resource_group_name").(string), d.Get("name").(string)) if d.IsNewResource() { - existing, err := client.GetProperties(ctx, resourceGroup, name) + existing, err := client.Get(ctx, id.ResourceGroup, id.Name) if err != nil { if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("checking for presence of existing Cognitive Account %q (Resource Group %q): %s", name, resourceGroup, err) + return fmt.Errorf("checking for presence of existing %s: %+v", id, err) } } - if existing.ID != nil && *existing.ID != "" { - return tf.ImportAsExistsError("azurerm_cognitive_account", *existing.ID) + if !utils.ResponseWasNotFound(existing.Response) { + return tf.ImportAsExistsError("azurerm_cognitive_account", id.ID()) } } sku, err := expandAccountSkuName(d.Get("sku_name").(string)) if err != nil { - return fmt.Errorf("expanding sku_name for Cognitive Account %s (Resource Group %q): %v", name, resourceGroup, err) + return fmt.Errorf("expanding sku_name for %s: %v", id, err) } - networkAcls, subnetIds := expandCognitiveAccountNetworkAcls(d.Get("network_acls").([]interface{})) + networkAcls, subnetIds := expandCognitiveAccountNetworkAcls(d) // also lock on the Virtual Network ID's since modifications in the networking stack are exclusive virtualNetworkNames := make([]string, 0) @@ -218,50 +364,57 @@ func resourceCognitiveAccountCreate(d *pluginsdk.ResourceData, meta interface{}) locks.MultipleByName(&virtualNetworkNames, network.VirtualNetworkResourceName) defer locks.UnlockMultipleByName(&virtualNetworkNames, network.VirtualNetworkResourceName) + publicNetworkAccess := cognitiveservices.PublicNetworkAccessEnabled + if !d.Get("public_network_access_enabled").(bool) { + publicNetworkAccess = cognitiveservices.PublicNetworkAccessDisabled + } + + apiProps, err := expandCognitiveAccountAPIProperties(d) + if err != nil { + return err + } props := cognitiveservices.Account{ Kind: utils.String(kind), Location: utils.String(azure.NormalizeLocation(d.Get("location").(string))), Sku: sku, Properties: &cognitiveservices.AccountProperties{ - APIProperties: &cognitiveservices.AccountAPIProperties{}, - NetworkAcls: networkAcls, - CustomSubDomainName: utils.String(d.Get("custom_subdomain_name").(string)), + APIProperties: apiProps, + NetworkAcls: networkAcls, + CustomSubDomainName: utils.String(d.Get("custom_subdomain_name").(string)), + AllowedFqdnList: utils.ExpandStringSlice(d.Get("fqdns").([]interface{})), + PublicNetworkAccess: publicNetworkAccess, + UserOwnedStorage: expandCognitiveAccountStorage(d.Get("storage").([]interface{})), + RestrictOutboundNetworkAccess: utils.Bool(d.Get("outbound_network_access_restrited").(bool)), + DisableLocalAuth: utils.Bool(!d.Get("local_auth_enabled").(bool)), }, Tags: tags.Expand(d.Get("tags").(map[string]interface{})), } - if kind == "QnAMaker" { - if v, ok := d.GetOk("qna_runtime_endpoint"); ok && v != "" { - props.Properties.APIProperties.QnaRuntimeEndpoint = utils.String(v.(string)) - } else { - return fmt.Errorf("the QnAMaker runtime endpoint `qna_runtime_endpoint` is required when kind is set to `QnAMaker`") - } + identityRaw := d.Get("identity").([]interface{}) + identity, err := expandCognitiveAccountIdentity(identityRaw) + if err != nil { + return fmt.Errorf("Error expanding `identity`: %+v", err) } + props.Identity = identity - if _, err := client.Create(ctx, resourceGroup, name, props); err != nil { - return fmt.Errorf("creating Cognitive Services Account %q (Resource Group %q): %+v", name, resourceGroup, err) + if _, err := client.Create(ctx, id.ResourceGroup, id.Name, props); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) } stateConf := &pluginsdk.StateChangeConf{ Pending: []string{"Creating"}, Target: []string{"Succeeded"}, - Refresh: cognitiveAccountStateRefreshFunc(ctx, client, resourceGroup, name), + Refresh: cognitiveAccountStateRefreshFunc(ctx, client, id), MinTimeout: 15 * time.Second, Timeout: d.Timeout(pluginsdk.TimeoutCreate), } - if _, err = stateConf.WaitForState(); err != nil { - return fmt.Errorf("waiting for Cognitive Account (%s) to become available: %s", d.Get("name"), err) + if _, err = stateConf.WaitForStateContext(ctx); err != nil { + return fmt.Errorf("waiting for creation of %s: %+v", id, err) } - read, err := client.GetProperties(ctx, resourceGroup, name) - if err != nil { - return fmt.Errorf("retrieving Cognitive Services Account %q (Resource Group %q): %+v", name, resourceGroup, err) - } - - d.SetId(*read.ID) - + d.SetId(id.ID()) return resourceCognitiveAccountRead(d, meta) } @@ -277,10 +430,10 @@ func resourceCognitiveAccountUpdate(d *pluginsdk.ResourceData, meta interface{}) sku, err := expandAccountSkuName(d.Get("sku_name").(string)) if err != nil { - return fmt.Errorf("error expanding sku_name for Cognitive Account %s (Resource Group %q): %v", id.Name, id.ResourceGroup, err) + return fmt.Errorf("error expanding sku_name for %s: %+v", *id, err) } - networkAcls, subnetIds := expandCognitiveAccountNetworkAcls(d.Get("network_acls").([]interface{})) + networkAcls, subnetIds := expandCognitiveAccountNetworkAcls(d) // also lock on the Virtual Network ID's since modifications in the networking stack are exclusive virtualNetworkNames := make([]string, 0) @@ -297,40 +450,52 @@ func resourceCognitiveAccountUpdate(d *pluginsdk.ResourceData, meta interface{}) locks.MultipleByName(&virtualNetworkNames, network.VirtualNetworkResourceName) defer locks.UnlockMultipleByName(&virtualNetworkNames, network.VirtualNetworkResourceName) + publicNetworkAccess := cognitiveservices.PublicNetworkAccessEnabled + if !d.Get("public_network_access_enabled").(bool) { + publicNetworkAccess = cognitiveservices.PublicNetworkAccessDisabled + } + + apiProps, err := expandCognitiveAccountAPIProperties(d) + if err != nil { + return err + } + props := cognitiveservices.Account{ Sku: sku, Properties: &cognitiveservices.AccountProperties{ - APIProperties: &cognitiveservices.AccountAPIProperties{}, - NetworkAcls: networkAcls, - CustomSubDomainName: utils.String(d.Get("custom_subdomain_name").(string)), + APIProperties: apiProps, + NetworkAcls: networkAcls, + CustomSubDomainName: utils.String(d.Get("custom_subdomain_name").(string)), + AllowedFqdnList: utils.ExpandStringSlice(d.Get("fqdns").([]interface{})), + PublicNetworkAccess: publicNetworkAccess, + UserOwnedStorage: expandCognitiveAccountStorage(d.Get("storage").([]interface{})), + RestrictOutboundNetworkAccess: utils.Bool(d.Get("outbound_network_access_restrited").(bool)), + DisableLocalAuth: utils.Bool(!d.Get("local_auth_enabled").(bool)), }, Tags: tags.Expand(d.Get("tags").(map[string]interface{})), } - - if kind := d.Get("kind"); kind == "QnAMaker" { - if v, ok := d.GetOk("qna_runtime_endpoint"); ok && v != "" { - props.Properties.APIProperties.QnaRuntimeEndpoint = utils.String(v.(string)) - } else { - return fmt.Errorf("the QnAMaker runtime endpoint `qna_runtime_endpoint` is required when kind is set to `QnAMaker`") - } + identityRaw := d.Get("identity").([]interface{}) + identity, err := expandCognitiveAccountIdentity(identityRaw) + if err != nil { + return fmt.Errorf("Error expanding `identity`: %+v", err) } + props.Identity = identity if _, err = client.Update(ctx, id.ResourceGroup, id.Name, props); err != nil { - return fmt.Errorf("Error updating Cognitive Services Account %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + return fmt.Errorf("updating %s: %+v", *id, err) } stateConf := &pluginsdk.StateChangeConf{ Pending: []string{"Accepted"}, Target: []string{"Succeeded"}, - Refresh: cognitiveAccountStateRefreshFunc(ctx, client, id.ResourceGroup, id.Name), + Refresh: cognitiveAccountStateRefreshFunc(ctx, client, *id), MinTimeout: 15 * time.Second, Timeout: d.Timeout(pluginsdk.TimeoutCreate), } - if _, err = stateConf.WaitForState(); err != nil { - return fmt.Errorf("waiting for Cognitive Account (%s) to become available: %s", d.Get("name"), err) + if _, err = stateConf.WaitForStateContext(ctx); err != nil { + return fmt.Errorf("waiting for update of %s: %+v", *id, err) } - return resourceCognitiveAccountRead(d, meta) } @@ -344,47 +509,62 @@ func resourceCognitiveAccountRead(d *pluginsdk.ResourceData, meta interface{}) e return err } - resp, err := client.GetProperties(ctx, id.ResourceGroup, id.Name) + resp, err := client.Get(ctx, id.ResourceGroup, id.Name) if err != nil { if utils.ResponseWasNotFound(resp.Response) { - log.Printf("[DEBUG] Cognitive Services Account %q was not found in Resource Group %q - removing from state!", id.Name, id.ResourceGroup) + log.Printf("[DEBUG] %s was not found", *id) d.SetId("") return nil } - return err + + return fmt.Errorf("retrieving %s: %+v", *id, err) + } + + keys, err := client.ListKeys(ctx, id.ResourceGroup, id.Name) + if err != nil { + // note for the resource we shouldn't gracefully fail since we have permission to CRUD it + return fmt.Errorf("listing the Keys for %s: %+v", *id, err) } d.Set("name", id.Name) d.Set("resource_group_name", id.ResourceGroup) d.Set("kind", resp.Kind) - if location := resp.Location; location != nil { - d.Set("location", azure.NormalizeLocation(*location)) - } - + d.Set("location", location.NormalizeNilable(resp.Location)) if sku := resp.Sku; sku != nil { d.Set("sku_name", sku.Name) } + identity, err := flattenCognitiveAccountIdentity(resp.Identity) + if err != nil { + return err + } + d.Set("identity", identity) + if props := resp.Properties; props != nil { if apiProps := props.APIProperties; apiProps != nil { d.Set("qna_runtime_endpoint", apiProps.QnaRuntimeEndpoint) + d.Set("metrics_advisor_aad_client_id", apiProps.AadClientID) + d.Set("metrics_advisor_aad_tenant_id", apiProps.AadTenantID) + d.Set("metrics_advisor_super_user_name", apiProps.SuperUser) + d.Set("metrics_advisor_website_name", apiProps.WebsiteName) } d.Set("endpoint", props.Endpoint) d.Set("custom_subdomain_name", props.CustomSubDomainName) if err := d.Set("network_acls", flattenCognitiveAccountNetworkAcls(props.NetworkAcls)); err != nil { - return fmt.Errorf("setting `network_acls` for Cognitive Account %q: %+v", *resp.Name, err) + return fmt.Errorf("setting `network_acls` for Cognitive Account %q: %+v", id, err) } - } - - keys, err := client.ListKeys(ctx, id.ResourceGroup, id.Name) - if err != nil { - if utils.ResponseWasNotFound(resp.Response) { - log.Printf("[DEBUG] Not able to obtain keys for Cognitive Services Account %q in Resource Group %q - removing from state!", id.Name, id.ResourceGroup) - d.SetId("") - return nil + d.Set("fqdns", utils.FlattenStringSlice(props.AllowedFqdnList)) + d.Set("public_network_access_enabled", props.PublicNetworkAccess == cognitiveservices.PublicNetworkAccessEnabled) + if err := d.Set("storage", flattenCognitiveAccountStorage(props.UserOwnedStorage)); err != nil { + return fmt.Errorf("setting `storages` for Cognitive Account %q: %+v", id, err) + } + if props.RestrictOutboundNetworkAccess != nil { + d.Set("outbound_network_access_restrited", props.RestrictOutboundNetworkAccess) + } + if props.DisableLocalAuth != nil { + d.Set("local_auth_enabled", !*props.DisableLocalAuth) } - return fmt.Errorf("Error obtaining keys for Cognitive Services Account %q in Resource Group %q: %v", id.Name, id.ResourceGroup, err) } d.Set("primary_access_key", keys.Key1) @@ -394,7 +574,8 @@ func resourceCognitiveAccountRead(d *pluginsdk.ResourceData, meta interface{}) e } func resourceCognitiveAccountDelete(d *pluginsdk.ResourceData, meta interface{}) error { - client := meta.(*clients.Client).Cognitive.AccountsClient + accountsClient := meta.(*clients.Client).Cognitive.AccountsClient + deletedAccountsClient := meta.(*clients.Client).Cognitive.DeletedAccountsClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -403,11 +584,33 @@ func resourceCognitiveAccountDelete(d *pluginsdk.ResourceData, meta interface{}) return err } - resp, err := client.Delete(ctx, id.ResourceGroup, id.Name) + // first we need to retrieve it, since we need the location to be able to purge it + log.Printf("[DEBUG] Retrieving %s..", *id) + account, err := accountsClient.Get(ctx, id.ResourceGroup, id.Name) + if err != nil { + return fmt.Errorf("retrieving %s: %+v", *id, err) + } + + log.Printf("[DEBUG] Deleting %s..", *id) + deleteFuture, err := accountsClient.Delete(ctx, id.ResourceGroup, id.Name) if err != nil { - if !response.WasNotFound(resp.Response) { - return fmt.Errorf("Error deleting Cognitive Services Account %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + return fmt.Errorf("deleting %s: %+v", *id, err) + } + if err := deleteFuture.WaitForCompletionRef(ctx, accountsClient.Client); err != nil { + return fmt.Errorf("waiting for deletion of %s: %+v", *id, err) + } + + if meta.(*clients.Client).Features.CognitiveAccount.PurgeSoftDeleteOnDestroy { + log.Printf("[DEBUG] Purging %s..", *id) + purgeFuture, err := deletedAccountsClient.Purge(ctx, *account.Location, id.ResourceGroup, id.Name) + if err != nil { + return fmt.Errorf("purging %s: %+v", *id, err) + } + if err := purgeFuture.WaitForCompletionRef(ctx, deletedAccountsClient.Client); err != nil { + return fmt.Errorf("waiting for purge of %s: %+v", *id, err) } + } else { + log.Printf("[DEBUG] Skipping Purge of %s", *id) } return nil @@ -417,11 +620,11 @@ func expandAccountSkuName(skuName string) (*cognitiveservices.Sku, error) { var tier cognitiveservices.SkuTier switch skuName[0:1] { case "F": - tier = cognitiveservices.Free + tier = cognitiveservices.SkuTierFree case "S": - tier = cognitiveservices.Standard + tier = cognitiveservices.SkuTierStandard case "P": - tier = cognitiveservices.Premium + tier = cognitiveservices.SkuTierPremium default: return nil, fmt.Errorf("sku_name %s has unknown sku tier %s", skuName, skuName[0:1]) } @@ -432,18 +635,19 @@ func expandAccountSkuName(skuName string) (*cognitiveservices.Sku, error) { }, nil } -func cognitiveAccountStateRefreshFunc(ctx context.Context, client *cognitiveservices.AccountsClient, resourceGroupName string, cognitiveAccountName string) pluginsdk.StateRefreshFunc { +func cognitiveAccountStateRefreshFunc(ctx context.Context, client *cognitiveservices.AccountsClient, id parse.AccountId) pluginsdk.StateRefreshFunc { return func() (interface{}, string, error) { - res, err := client.GetProperties(ctx, resourceGroupName, cognitiveAccountName) + res, err := client.Get(ctx, id.ResourceGroup, id.Name) if err != nil { - return nil, "", fmt.Errorf("issuing read request in cognitiveAccountStateRefreshFunc to Azure ARM for Cognitive Account '%s' (RG: '%s'): %s", cognitiveAccountName, resourceGroupName, err) + return nil, "", fmt.Errorf("polling for %s: %+v", id, err) } return res, string(res.Properties.ProvisioningState), nil } } -func expandCognitiveAccountNetworkAcls(input []interface{}) (*cognitiveservices.NetworkRuleSet, []string) { +func expandCognitiveAccountNetworkAcls(d *pluginsdk.ResourceData) (*cognitiveservices.NetworkRuleSet, []string) { + input := d.Get("network_acls").([]interface{}) subnetIds := make([]string, 0) if len(input) == 0 || input[0] == nil { return nil, subnetIds @@ -463,15 +667,30 @@ func expandCognitiveAccountNetworkAcls(input []interface{}) (*cognitiveservices. ipRules = append(ipRules, rule) } - networkRulesRaw := v["virtual_network_subnet_ids"].(*pluginsdk.Set) networkRules := make([]cognitiveservices.VirtualNetworkRule, 0) - for _, v := range networkRulesRaw.List() { - rawId := v.(string) - subnetIds = append(subnetIds, rawId) - rule := cognitiveservices.VirtualNetworkRule{ - ID: utils.String(rawId), + if d.HasChange("network_acls.0.virtual_network_subnet_ids") { + networkRulesRaw := v["virtual_network_subnet_ids"] + for _, v := range networkRulesRaw.(*pluginsdk.Set).List() { + rawId := v.(string) + subnetIds = append(subnetIds, rawId) + rule := cognitiveservices.VirtualNetworkRule{ + ID: utils.String(rawId), + } + networkRules = append(networkRules, rule) + } + } + if d.HasChange("network_acls.0.virtual_network_rules") { + networkRulesRaw := v["virtual_network_rules"] + for _, v := range networkRulesRaw.(*pluginsdk.Set).List() { + value := v.(map[string]interface{}) + subnetId := value["subnet_id"].(string) + subnetIds = append(subnetIds, subnetId) + rule := cognitiveservices.VirtualNetworkRule{ + ID: utils.String(subnetId), + IgnoreMissingVnetServiceEndpoint: utils.Bool(value["ignore_missing_vnet_service_endpoint"].(bool)), + } + networkRules = append(networkRules, rule) } - networkRules = append(networkRules, rule) } ruleSet := cognitiveservices.NetworkRuleSet{ @@ -482,15 +701,104 @@ func expandCognitiveAccountNetworkAcls(input []interface{}) (*cognitiveservices. return &ruleSet, subnetIds } +func expandCognitiveAccountStorage(input []interface{}) *[]cognitiveservices.UserOwnedStorage { + if len(input) == 0 { + return nil + } + results := make([]cognitiveservices.UserOwnedStorage, 0) + for _, v := range input { + value := v.(map[string]interface{}) + results = append(results, cognitiveservices.UserOwnedStorage{ + ResourceID: utils.String(value["storage_account_id"].(string)), + IdentityClientID: utils.String(value["identity_client_id"].(string)), + }) + } + return &results +} + +func expandCognitiveAccountIdentity(vs []interface{}) (*cognitiveservices.Identity, error) { + if len(vs) == 0 { + return &cognitiveservices.Identity{ + Type: cognitiveservices.ResourceIdentityTypeNone, + }, nil + } + + v := vs[0].(map[string]interface{}) + managedServiceIdentity := cognitiveservices.Identity{ + Type: cognitiveservices.ResourceIdentityType(v["type"].(string)), + } + + var identityIdSet []interface{} + if identityIds, ok := v["identity_ids"]; ok { + identityIdSet = identityIds.(*pluginsdk.Set).List() + } + + // If type contains `UserAssigned`, `identity_ids` must be specified and have at least 1 element + if managedServiceIdentity.Type == cognitiveservices.ResourceIdentityTypeUserAssigned || managedServiceIdentity.Type == cognitiveservices.ResourceIdentityTypeSystemAssignedUserAssigned { + if len(identityIdSet) == 0 { + return nil, fmt.Errorf("`identity_ids` must have at least 1 element when `type` includes `UserAssigned`") + } + + userAssignedIdentities := make(map[string]*cognitiveservices.UserAssignedIdentity) + for _, id := range identityIdSet { + userAssignedIdentities[id.(string)] = &cognitiveservices.UserAssignedIdentity{} + } + + managedServiceIdentity.UserAssignedIdentities = userAssignedIdentities + } else if len(identityIdSet) > 0 { + // If type does _not_ contain `UserAssigned` (i.e. is set to `SystemAssigned` or defaulted to `None`), `identity_ids` is not allowed + return nil, fmt.Errorf("`identity_ids` can only be specified when `type` includes `UserAssigned`; but `type` is currently %q", managedServiceIdentity.Type) + } + + return &managedServiceIdentity, nil +} + +func expandCognitiveAccountAPIProperties(d *pluginsdk.ResourceData) (*cognitiveservices.APIProperties, error) { + props := cognitiveservices.APIProperties{} + kind := d.Get("kind") + if kind == "QnAMaker" { + if v, ok := d.GetOk("qna_runtime_endpoint"); ok && v != "" { + props.QnaRuntimeEndpoint = utils.String(v.(string)) + } else { + return nil, fmt.Errorf("the QnAMaker runtime endpoint `qna_runtime_endpoint` is required when kind is set to `QnAMaker`") + } + } + if v, ok := d.GetOk("metrics_advisor_aad_client_id"); ok { + if kind == "MetricsAdvisor" { + props.AadClientID = utils.String(v.(string)) + } else { + return nil, fmt.Errorf("metrics_advisor_aad_client_id can only used set when kind is set to `MetricsAdvisor`") + } + } + if v, ok := d.GetOk("metrics_advisor_aad_tenant_id"); ok { + if kind == "MetricsAdvisor" { + props.AadTenantID = utils.String(v.(string)) + } else { + return nil, fmt.Errorf("metrics_advisor_aad_tenant_id can only used set when kind is set to `MetricsAdvisor`") + } + } + if v, ok := d.GetOk("metrics_advisor_super_user_name"); ok { + if kind == "MetricsAdvisor" { + props.SuperUser = utils.String(v.(string)) + } else { + return nil, fmt.Errorf("metrics_advisor_super_user_name can only used set when kind is set to `MetricsAdvisor`") + } + } + if v, ok := d.GetOk("metrics_advisor_website_name"); ok { + if kind == "MetricsAdvisor" { + props.WebsiteName = utils.String(v.(string)) + } else { + return nil, fmt.Errorf("metrics_advisor_website_name can only used set when kind is set to `MetricsAdvisor`") + } + } + return &props, nil +} + func flattenCognitiveAccountNetworkAcls(input *cognitiveservices.NetworkRuleSet) []interface{} { if input == nil { return []interface{}{} } - output := make(map[string]interface{}) - - output["default_action"] = string(input.DefaultAction) - ipRules := make([]interface{}, 0) if input.IPRules != nil { for _, v := range *input.IPRules { @@ -501,8 +809,8 @@ func flattenCognitiveAccountNetworkAcls(input *cognitiveservices.NetworkRuleSet) ipRules = append(ipRules, *v.Value) } } - output["ip_rules"] = pluginsdk.NewSet(pluginsdk.HashString, ipRules) + virtualNetworkSubnetIds := make([]interface{}, 0) virtualNetworkRules := make([]interface{}, 0) if input.VirtualNetworkRules != nil { for _, v := range *input.VirtualNetworkRules { @@ -516,10 +824,68 @@ func flattenCognitiveAccountNetworkAcls(input *cognitiveservices.NetworkRuleSet) id = subnetId.ID() } - virtualNetworkRules = append(virtualNetworkRules, id) + virtualNetworkSubnetIds = append(virtualNetworkSubnetIds, id) + virtualNetworkRules = append(virtualNetworkRules, map[string]interface{}{ + "subnet_id": id, + "ignore_missing_vnet_service_endpoint": *v.IgnoreMissingVnetServiceEndpoint, + }) + } + } + return []interface{}{ + map[string]interface{}{ + "default_action": string(input.DefaultAction), + "ip_rules": pluginsdk.NewSet(pluginsdk.HashString, ipRules), + "virtual_network_subnet_ids": pluginsdk.NewSet(pluginsdk.HashString, virtualNetworkSubnetIds), + "virtual_network_rules": virtualNetworkRules, + }, + } +} + +func flattenCognitiveAccountStorage(input *[]cognitiveservices.UserOwnedStorage) []interface{} { + if input == nil { + return []interface{}{} + } + results := make([]interface{}, 0) + for _, v := range *input { + value := make(map[string]interface{}) + if v.ResourceID != nil { + value["storage_account_id"] = *v.ResourceID + } + if v.IdentityClientID != nil { + value["identity_client_id"] = *v.IdentityClientID + } + results = append(results, value) + } + return results +} + +func flattenCognitiveAccountIdentity(identity *cognitiveservices.Identity) ([]interface{}, error) { + if identity == nil || identity.Type == cognitiveservices.ResourceIdentityTypeNone { + return make([]interface{}, 0), nil + } + + result := make(map[string]interface{}) + result["type"] = string(identity.Type) + + if identity.PrincipalID != nil { + result["principal_id"] = *identity.PrincipalID + } + + if identity.TenantID != nil { + result["tenant_id"] = *identity.TenantID + } + + identityIds := make([]interface{}, 0) + if identity.UserAssignedIdentities != nil { + for key := range identity.UserAssignedIdentities { + parsedId, err := msiparse.UserAssignedIdentityID(key) + if err != nil { + return nil, err + } + identityIds = append(identityIds, parsedId.ID()) } + result["identity_ids"] = pluginsdk.NewSet(pluginsdk.HashString, identityIds) } - output["virtual_network_subnet_ids"] = pluginsdk.NewSet(pluginsdk.HashString, virtualNetworkRules) - return []interface{}{output} + return []interface{}{result}, nil } diff --git a/azurerm/internal/services/cognitive/cognitive_account_resource_test.go b/azurerm/internal/services/cognitive/cognitive_account_resource_test.go index e58dca1194fa..1111a71f77b8 100644 --- a/azurerm/internal/services/cognitive/cognitive_account_resource_test.go +++ b/azurerm/internal/services/cognitive/cognitive_account_resource_test.go @@ -6,6 +6,7 @@ import ( "regexp" "testing" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -55,6 +56,23 @@ func TestAccCognitiveAccount_speechServices(t *testing.T) { }) } +func TestAccCognitiveAccount_speechServicesWithStorage(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_cognitive_account", "test") + r := CognitiveAccountResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.speechServicesWithStorage(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("primary_access_key").Exists(), + check.That(data.ResourceName).Key("secondary_access_key").Exists(), + ), + }, + data.ImportStep(), + }) +} + func TestAccCognitiveAccount_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cognitive_account", "test") r := CognitiveAccountResource{} @@ -196,6 +214,28 @@ func TestAccCognitiveAccount_withMultipleCognitiveAccounts(t *testing.T) { }) } +func TestAccCognitiveAccount_networkAclsVirtualNetworkRules(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_cognitive_account", "test") + r := CognitiveAccountResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.networkAclsVirtualNetworkRules(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.networkAclsVirtualNetworkRulesUpdated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func TestAccCognitiveAccount_networkAcls(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cognitive_account", "test") r := CognitiveAccountResource{} @@ -218,13 +258,68 @@ func TestAccCognitiveAccount_networkAcls(t *testing.T) { }) } +func TestAccCognitiveAccount_identity(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_cognitive_account", "test") + r := CognitiveAccountResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.identitySystemAssignedUserAssigned(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("identity.0.principal_id").MatchesRegex(validate.UUIDRegExp), + check.That(data.ResourceName).Key("identity.0.tenant_id").MatchesRegex(validate.UUIDRegExp), + ), + }, + data.ImportStep(), + { + Config: r.identityUserAssigned(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.identitySystemAssigned(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("identity.0.principal_id").MatchesRegex(validate.UUIDRegExp), + check.That(data.ResourceName).Key("identity.0.tenant_id").MatchesRegex(validate.UUIDRegExp), + ), + }, + data.ImportStep(), + }) +} + +func TestAccCognitiveAccount_metricsAdvisor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_cognitive_account", "test") + r := CognitiveAccountResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.metricsAdvisor(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func (t CognitiveAccountResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.AccountID(state.ID) if err != nil { return nil, err } - resp, err := clients.Cognitive.AccountsClient.GetProperties(ctx, id.ResourceGroup, id.Name) + resp, err := clients.Cognitive.AccountsClient.Get(ctx, id.ResourceGroup, id.Name) if err != nil { return nil, fmt.Errorf("retrieving Cognitive Services Account %q (Resource Group: %q) does not exist", id.Name, id.ResourceGroup) } @@ -253,6 +348,96 @@ resource "azurerm_cognitive_account" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger) } +func (CognitiveAccountResource) identitySystemAssigned(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-cognitive-%d" + location = "%s" +} + +resource "azurerm_cognitive_account" "test" { + name = "acctestcogacc-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + kind = "Face" + sku_name = "S0" + identity { + type = "SystemAssigned" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +} + +func (CognitiveAccountResource) identityUserAssigned(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-cognitive-%d" + location = "%s" +} + +resource "azurerm_user_assigned_identity" "test" { + name = "acctestUAI-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_cognitive_account" "test" { + name = "acctestcogacc-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + kind = "Face" + sku_name = "S0" + identity { + type = "UserAssigned" + identity_ids = [ + azurerm_user_assigned_identity.test.id, + ] + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) +} + +func (CognitiveAccountResource) identitySystemAssignedUserAssigned(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-cognitive-%d" + location = "%s" +} + +resource "azurerm_user_assigned_identity" "test" { + name = "acctestUAI-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_cognitive_account" "test" { + name = "acctestcogacc-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + kind = "Face" + sku_name = "S0" + identity { + type = "SystemAssigned, UserAssigned" + identity_ids = [ + azurerm_user_assigned_identity.test.id, + ] + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) +} + func (CognitiveAccountResource) speechServices(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { @@ -271,7 +456,52 @@ resource "azurerm_cognitive_account" "test" { kind = "SpeechServices" sku_name = "S0" } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +`, data.RandomInteger, data.Locations.Secondary, data.RandomInteger) +} + +func (CognitiveAccountResource) speechServicesWithStorage(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-cognitive-%d" + location = "%s" +} + +resource "azurerm_storage_account" "test" { + name = "acctestrg%d" + resource_group_name = azurerm_resource_group.test.name + + location = azurerm_resource_group.test.location + account_tier = "Standard" + account_replication_type = "LRS" +} + +resource "azurerm_user_assigned_identity" "test" { + name = "acctest-identity-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location +} + +resource "azurerm_cognitive_account" "test" { + name = "acctestcogacc-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + kind = "SpeechServices" + sku_name = "S0" + + identity { + type = "SystemAssigned" + } + + storage { + storage_account_id = azurerm_storage_account.test.id + identity_client_id = azurerm_user_assigned_identity.test.client_id + } +} +`, data.RandomInteger, data.Locations.Secondary, data.RandomIntOfLength(8), data.RandomInteger, data.RandomInteger) } func (CognitiveAccountResource) requiresImport(data acceptance.TestData) string { @@ -307,6 +537,11 @@ resource "azurerm_cognitive_account" "test" { kind = "Face" sku_name = "S0" + fqdns = ["foo.com", "bar.com"] + public_network_access_enabled = false + outbound_network_access_restrited = true + local_auth_enabled = false + tags = { Acceptance = "Test" } @@ -333,7 +568,7 @@ resource "azurerm_cognitive_account" "test" { qna_runtime_endpoint = "%s" sku_name = "S0" } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, url) +`, data.RandomInteger, "West US", data.RandomInteger, url) // QnAMaker only available in West US } func (CognitiveAccountResource) qnaRuntimeEndpointUnspecified(data acceptance.TestData) string { @@ -354,7 +589,7 @@ resource "azurerm_cognitive_account" "test" { kind = "QnAMaker" sku_name = "S0" } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +`, data.RandomInteger, "West US", data.RandomInteger) // QnAMaker only available in West US } func (CognitiveAccountResource) cognitiveServices(data acceptance.TestData) string { @@ -378,6 +613,30 @@ resource "azurerm_cognitive_account" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger) } +func (CognitiveAccountResource) metricsAdvisor(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} +resource "azurerm_resource_group" "test" { + name = "acctestRG-cognitive-%d" + location = "%s" +} +resource "azurerm_cognitive_account" "test" { + name = "acctestcogacc-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + kind = "MetricsAdvisor" + sku_name = "S0" + custom_subdomain_name = "acctestcogacc-%d" + metrics_advisor_aad_client_id = "310d7b2e-d1d1-4b87-9807-5b885b290c00" + metrics_advisor_aad_tenant_id = "72f988bf-86f1-41af-91ab-2d7cd011db47" + metrics_advisor_super_user_name = "mock_user1" + metrics_advisor_website_name = "mock_name2" +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) +} + func (CognitiveAccountResource) withMultipleCognitiveAccounts(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { @@ -447,6 +706,56 @@ resource "azurerm_cognitive_account" "test" { `, r.networkAclsTemplate(data), data.RandomInteger, data.RandomInteger) } +func (r CognitiveAccountResource) networkAclsVirtualNetworkRules(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_cognitive_account" "test" { + name = "acctestcogacc-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + kind = "Face" + sku_name = "S0" + custom_subdomain_name = "acctestcogacc-%d" + + network_acls { + default_action = "Deny" + virtual_network_rules { + subnet_id = azurerm_subnet.test_a.id + } + virtual_network_rules { + subnet_id = azurerm_subnet.test_b.id + ignore_missing_vnet_service_endpoint = true + } + + } +} +`, r.networkAclsTemplate(data), data.RandomInteger, data.RandomInteger) +} + +func (r CognitiveAccountResource) networkAclsVirtualNetworkRulesUpdated(data acceptance.TestData) string { + return fmt.Sprintf(` +%s +resource "azurerm_cognitive_account" "test" { + name = "acctestcogacc-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + kind = "Face" + sku_name = "S0" + custom_subdomain_name = "acctestcogacc-%d" + + network_acls { + default_action = "Allow" + ip_rules = ["123.0.0.101"] + virtual_network_rules { + subnet_id = azurerm_subnet.test_a.id + ignore_missing_vnet_service_endpoint = true + } + } +} +`, r.networkAclsTemplate(data), data.RandomInteger, data.RandomInteger) +} + func (CognitiveAccountResource) networkAclsTemplate(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { diff --git a/azurerm/internal/services/compute/availability_set_data_source.go b/azurerm/internal/services/compute/availability_set_data_source.go index ad9c749445aa..b7fdc66a8fe9 100644 --- a/azurerm/internal/services/compute/availability_set_data_source.go +++ b/azurerm/internal/services/compute/availability_set_data_source.go @@ -6,49 +6,49 @@ import ( "strings" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceAvailabilitySet() *schema.Resource { - return &schema.Resource{ +func dataSourceAvailabilitySet() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceAvailabilitySetRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "resource_group_name": azure.SchemaResourceGroupNameForDataSource(), "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, "location": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "platform_update_domain_count": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "platform_fault_domain_count": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "managed": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, @@ -57,7 +57,7 @@ func dataSourceAvailabilitySet() *schema.Resource { } } -func dataSourceAvailabilitySetRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceAvailabilitySetRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.AvailabilitySetsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/availability_set_data_source_test.go b/azurerm/internal/services/compute/availability_set_data_source_test.go index 7f211b8d3b60..69c533736051 100644 --- a/azurerm/internal/services/compute/availability_set_data_source_test.go +++ b/azurerm/internal/services/compute/availability_set_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -16,10 +15,10 @@ func TestAccDataSourceAvailabilitySet_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_availability_set", "test") r := AvailabilitySetDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("location").Exists(), check.That(data.ResourceName).Key("name").Exists(), check.That(data.ResourceName).Key("resource_group_name").Exists(), diff --git a/azurerm/internal/services/compute/availability_set_resource.go b/azurerm/internal/services/compute/availability_set_resource.go index 6c75e72e0c3e..1db2297593a1 100644 --- a/azurerm/internal/services/compute/availability_set_resource.go +++ b/azurerm/internal/services/compute/availability_set_resource.go @@ -8,8 +8,6 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -17,12 +15,13 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceAvailabilitySet() *schema.Resource { - return &schema.Resource{ +func resourceAvailabilitySet() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceAvailabilitySetCreateUpdate, Read: resourceAvailabilitySetRead, Update: resourceAvailabilitySetCreateUpdate, @@ -30,16 +29,16 @@ func resourceAvailabilitySet() *schema.Resource { // TODO: replace this with an importer which validates the ID during import Importer: pluginsdk.DefaultImporter(), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringMatch( @@ -53,7 +52,7 @@ func resourceAvailabilitySet() *schema.Resource { "location": azure.SchemaLocation(), "platform_update_domain_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Default: 5, ForceNew: true, @@ -61,7 +60,7 @@ func resourceAvailabilitySet() *schema.Resource { }, "platform_fault_domain_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Default: 3, ForceNew: true, @@ -69,14 +68,14 @@ func resourceAvailabilitySet() *schema.Resource { }, "managed": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, ForceNew: true, }, "proximity_placement_group_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, @@ -92,7 +91,7 @@ func resourceAvailabilitySet() *schema.Resource { } } -func resourceAvailabilitySetCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceAvailabilitySetCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.AvailabilitySetsClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -154,7 +153,7 @@ func resourceAvailabilitySetCreateUpdate(d *schema.ResourceData, meta interface{ return resourceAvailabilitySetRead(d, meta) } -func resourceAvailabilitySetRead(d *schema.ResourceData, meta interface{}) error { +func resourceAvailabilitySetRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.AvailabilitySetsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -194,7 +193,7 @@ func resourceAvailabilitySetRead(d *schema.ResourceData, meta interface{}) error return tags.FlattenAndSet(d, resp.Tags) } -func resourceAvailabilitySetDelete(d *schema.ResourceData, meta interface{}) error { +func resourceAvailabilitySetDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.AvailabilitySetsClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/availability_set_resource_test.go b/azurerm/internal/services/compute/availability_set_resource_test.go index 0e8c2ebba60e..65edd570089b 100644 --- a/azurerm/internal/services/compute/availability_set_resource_test.go +++ b/azurerm/internal/services/compute/availability_set_resource_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -22,10 +21,10 @@ func TestAccAvailabilitySet_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_availability_set", "test") r := AvailabilitySetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("platform_update_domain_count").HasValue("5"), check.That(data.ResourceName).Key("platform_fault_domain_count").HasValue("3"), @@ -39,10 +38,10 @@ func TestAccAvailabilitySet_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_availability_set", "test") r := AvailabilitySetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("platform_update_domain_count").HasValue("5"), check.That(data.ResourceName).Key("platform_fault_domain_count").HasValue("3"), @@ -59,7 +58,7 @@ func TestAccAvailabilitySet_disappears(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_availability_set", "test") r := AvailabilitySetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ data.DisappearsStep(acceptance.DisappearsStepData{ Config: r.basic, TestResource: r, @@ -71,10 +70,10 @@ func TestAccAvailabilitySet_withTags(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_availability_set", "test") r := AvailabilitySetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.withTags(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("tags.%").HasValue("2"), check.That(data.ResourceName).Key("tags.environment").HasValue("Production"), @@ -83,7 +82,7 @@ func TestAccAvailabilitySet_withTags(t *testing.T) { }, { Config: r.withUpdatedTags(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("tags.%").HasValue("1"), check.That(data.ResourceName).Key("tags.environment").HasValue("staging"), @@ -97,10 +96,10 @@ func TestAccAvailabilitySet_withPPG(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_availability_set", "test") r := AvailabilitySetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.withPPG(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("proximity_placement_group_id").Exists(), ), @@ -113,10 +112,10 @@ func TestAccAvailabilitySet_withDomainCounts(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_availability_set", "test") r := AvailabilitySetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.withDomainCounts(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("platform_update_domain_count").HasValue("3"), check.That(data.ResourceName).Key("platform_fault_domain_count").HasValue("3"), @@ -130,10 +129,10 @@ func TestAccAvailabilitySet_unmanaged(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_availability_set", "test") r := AvailabilitySetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.unmanaged(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("managed").HasValue("false"), ), @@ -142,7 +141,7 @@ func TestAccAvailabilitySet_unmanaged(t *testing.T) { }) } -func (AvailabilitySetResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (AvailabilitySetResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.AvailabilitySetID(state.ID) if err != nil { return nil, err @@ -156,7 +155,7 @@ func (AvailabilitySetResource) Exists(ctx context.Context, clients *clients.Clie return utils.Bool(resp.ID != nil), nil } -func (AvailabilitySetResource) Destroy(ctx context.Context, client *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (AvailabilitySetResource) Destroy(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.AvailabilitySetID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/compute/dedicated_host_data_source.go b/azurerm/internal/services/compute/dedicated_host_data_source.go index b893c7956da5..e5c773984687 100644 --- a/azurerm/internal/services/compute/dedicated_host_data_source.go +++ b/azurerm/internal/services/compute/dedicated_host_data_source.go @@ -4,34 +4,32 @@ import ( "fmt" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" - - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" - - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceDedicatedHost() *schema.Resource { - return &schema.Resource{ +func dataSourceDedicatedHost() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceDedicatedHostRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.DedicatedHostName(), }, "dedicated_host_group_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.DedicatedHostGroupName(), }, @@ -45,7 +43,7 @@ func dataSourceDedicatedHost() *schema.Resource { } } -func dataSourceDedicatedHostRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceDedicatedHostRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.DedicatedHostsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/dedicated_host_data_source_test.go b/azurerm/internal/services/compute/dedicated_host_data_source_test.go index 4ae681f9b9cc..454de353fbaa 100644 --- a/azurerm/internal/services/compute/dedicated_host_data_source_test.go +++ b/azurerm/internal/services/compute/dedicated_host_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -16,10 +15,10 @@ func TestAccDataSourceAzureRMDedicatedHost_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_dedicated_host", "test") r := DedicatedHostDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("location").Exists(), check.That(data.ResourceName).Key("tags.%").Exists(), ), diff --git a/azurerm/internal/services/compute/dedicated_host_group_data_source.go b/azurerm/internal/services/compute/dedicated_host_group_data_source.go index 92c47c3db521..c40dd5f715b9 100644 --- a/azurerm/internal/services/compute/dedicated_host_group_data_source.go +++ b/azurerm/internal/services/compute/dedicated_host_group_data_source.go @@ -5,28 +5,26 @@ import ( "regexp" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" - - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" - - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceDedicatedHostGroup() *schema.Resource { - return &schema.Resource{ +func dataSourceDedicatedHostGroup() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceDedicatedHostGroupRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringMatch(regexp.MustCompile(`^[^_\W][\w-.]{0,78}[\w]$`), ""), }, @@ -36,20 +34,20 @@ func dataSourceDedicatedHostGroup() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupNameForDataSource(), "platform_fault_domain_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "automatic_placement_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "zones": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, @@ -58,7 +56,7 @@ func dataSourceDedicatedHostGroup() *schema.Resource { } } -func dataSourceDedicatedHostGroupRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceDedicatedHostGroupRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.DedicatedHostGroupsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/dedicated_host_group_data_source_test.go b/azurerm/internal/services/compute/dedicated_host_group_data_source_test.go index d9bb3d030b23..1a026b826b5e 100644 --- a/azurerm/internal/services/compute/dedicated_host_group_data_source_test.go +++ b/azurerm/internal/services/compute/dedicated_host_group_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -16,10 +15,10 @@ func TestAccDataSourceDedicatedHostGroup_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_dedicated_host_group", "test") r := DedicatedHostGroupDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("zones.#").HasValue("1"), check.That(data.ResourceName).Key("zones.0").HasValue("1"), check.That(data.ResourceName).Key("platform_fault_domain_count").HasValue("2"), diff --git a/azurerm/internal/services/compute/dedicated_host_group_resource.go b/azurerm/internal/services/compute/dedicated_host_group_resource.go index 13efb4f97486..0cafc389a556 100644 --- a/azurerm/internal/services/compute/dedicated_host_group_resource.go +++ b/azurerm/internal/services/compute/dedicated_host_group_resource.go @@ -5,24 +5,20 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" - - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceDedicatedHostGroup() *schema.Resource { - return &schema.Resource{ +func resourceDedicatedHostGroup() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceDedicatedHostGroupCreate, Read: resourceDedicatedHostGroupRead, Update: resourceDedicatedHostGroupUpdate, @@ -31,16 +27,16 @@ func resourceDedicatedHostGroup() *schema.Resource { // TODO: replace this with an importer which validates the ID during import Importer: pluginsdk.DefaultImporter(), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.DedicatedHostGroupName(), @@ -53,14 +49,14 @@ func resourceDedicatedHostGroup() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupNameDiffSuppress(), "platform_fault_domain_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, ForceNew: true, ValidateFunc: validation.IntBetween(1, 3), }, "automatic_placement_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, ForceNew: true, Default: false, @@ -75,7 +71,7 @@ func resourceDedicatedHostGroup() *schema.Resource { } } -func resourceDedicatedHostGroupCreate(d *schema.ResourceData, meta interface{}) error { +func resourceDedicatedHostGroupCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.DedicatedHostGroupsClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -130,7 +126,7 @@ func resourceDedicatedHostGroupCreate(d *schema.ResourceData, meta interface{}) return resourceDedicatedHostGroupRead(d, meta) } -func resourceDedicatedHostGroupRead(d *schema.ResourceData, meta interface{}) error { +func resourceDedicatedHostGroupRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.DedicatedHostGroupsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -171,7 +167,7 @@ func resourceDedicatedHostGroupRead(d *schema.ResourceData, meta interface{}) er return tags.FlattenAndSet(d, resp.Tags) } -func resourceDedicatedHostGroupUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceDedicatedHostGroupUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.DedicatedHostGroupsClient ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -191,7 +187,7 @@ func resourceDedicatedHostGroupUpdate(d *schema.ResourceData, meta interface{}) return resourceDedicatedHostGroupRead(d, meta) } -func resourceDedicatedHostGroupDelete(d *schema.ResourceData, meta interface{}) error { +func resourceDedicatedHostGroupDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.DedicatedHostGroupsClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/dedicated_host_group_resource_test.go b/azurerm/internal/services/compute/dedicated_host_group_resource_test.go index c324ae079634..f17250b47c22 100644 --- a/azurerm/internal/services/compute/dedicated_host_group_resource_test.go +++ b/azurerm/internal/services/compute/dedicated_host_group_resource_test.go @@ -5,12 +5,11 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -21,10 +20,10 @@ func TestAccDedicatedHostGroup_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_dedicated_host_group", "test") r := DedicatedHostGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -36,10 +35,10 @@ func TestAccDedicatedHostGroup_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_dedicated_host_group", "test") r := DedicatedHostGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -51,10 +50,10 @@ func TestAccDedicatedHostGroup_automaticPlacementEnabled(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_dedicated_host_group", "test") r := DedicatedHostGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.automaticPlacementEnabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -66,10 +65,10 @@ func TestAccDedicatedHostGroup_complete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_dedicated_host_group", "test") r := DedicatedHostGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.complete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("zones.#").HasValue("1"), check.That(data.ResourceName).Key("zones.0").HasValue("1"), @@ -81,7 +80,7 @@ func TestAccDedicatedHostGroup_complete(t *testing.T) { }) } -func (r DedicatedHostGroupResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (r DedicatedHostGroupResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := azure.ParseAzureResourceID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/compute/dedicated_host_resource.go b/azurerm/internal/services/compute/dedicated_host_resource.go index b3a395501e58..9afa805ea052 100644 --- a/azurerm/internal/services/compute/dedicated_host_resource.go +++ b/azurerm/internal/services/compute/dedicated_host_resource.go @@ -6,27 +6,22 @@ import ( "log" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - - "github.com/hashicorp/go-azure-helpers/response" - - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceDedicatedHost() *schema.Resource { - return &schema.Resource{ +func resourceDedicatedHost() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceDedicatedHostCreate, Read: resourceDedicatedHostRead, Update: resourceDedicatedHostUpdate, @@ -37,16 +32,16 @@ func resourceDedicatedHost() *schema.Resource { return err }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.DedicatedHostName(), @@ -55,14 +50,14 @@ func resourceDedicatedHost() *schema.Resource { "location": azure.SchemaLocation(), "dedicated_host_group_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.DedicatedHostGroupID, }, "sku_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, ForceNew: true, Required: true, ValidateFunc: validation.StringInSlice([]string{ @@ -98,19 +93,19 @@ func resourceDedicatedHost() *schema.Resource { }, "platform_fault_domain": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, ForceNew: true, Required: true, }, "auto_replace_on_failure": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, "license_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ string(compute.DedicatedHostLicenseTypesNone), @@ -125,7 +120,7 @@ func resourceDedicatedHost() *schema.Resource { } } -func resourceDedicatedHostCreate(d *schema.ResourceData, meta interface{}) error { +func resourceDedicatedHostCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.DedicatedHostsClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -184,7 +179,7 @@ func resourceDedicatedHostCreate(d *schema.ResourceData, meta interface{}) error return resourceDedicatedHostRead(d, meta) } -func resourceDedicatedHostRead(d *schema.ResourceData, meta interface{}) error { +func resourceDedicatedHostRead(d *pluginsdk.ResourceData, meta interface{}) error { groupsClient := meta.(*clients.Client).Compute.DedicatedHostGroupsClient hostsClient := meta.(*clients.Client).Compute.DedicatedHostsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) @@ -238,7 +233,7 @@ func resourceDedicatedHostRead(d *schema.ResourceData, meta interface{}) error { return tags.FlattenAndSet(d, resp.Tags) } -func resourceDedicatedHostUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceDedicatedHostUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.DedicatedHostsClient ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -267,7 +262,7 @@ func resourceDedicatedHostUpdate(d *schema.ResourceData, meta interface{}) error return resourceDedicatedHostRead(d, meta) } -func resourceDedicatedHostDelete(d *schema.ResourceData, meta interface{}) error { +func resourceDedicatedHostDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.DedicatedHostsClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -290,23 +285,23 @@ func resourceDedicatedHostDelete(d *schema.ResourceData, meta interface{}) error // API has bug, which appears to be eventually consistent. Tracked by this issue: https://github.com/Azure/azure-rest-api-specs/issues/8137 log.Printf("[DEBUG] Waiting for Dedicated Host %q (Host Group Name %q / Resource Group %q) to disappear", id.HostName, id.HostGroupName, id.ResourceGroup) - stateConf := &resource.StateChangeConf{ + stateConf := &pluginsdk.StateChangeConf{ Pending: []string{"Exists"}, Target: []string{"NotFound"}, Refresh: dedicatedHostDeletedRefreshFunc(ctx, client, id), MinTimeout: 10 * time.Second, ContinuousTargetOccurence: 20, - Timeout: d.Timeout(schema.TimeoutDelete), + Timeout: d.Timeout(pluginsdk.TimeoutDelete), } - if _, err = stateConf.WaitForState(); err != nil { + if _, err = stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("Error waiting for Dedicated Host %q (Host Group Name %q / Resource Group %q) to become available: %+v", id.HostName, id.HostGroupName, id.ResourceGroup, err) } return nil } -func dedicatedHostDeletedRefreshFunc(ctx context.Context, client *compute.DedicatedHostsClient, id *parse.DedicatedHostId) resource.StateRefreshFunc { +func dedicatedHostDeletedRefreshFunc(ctx context.Context, client *compute.DedicatedHostsClient, id *parse.DedicatedHostId) pluginsdk.StateRefreshFunc { return func() (interface{}, string, error) { res, err := client.Get(ctx, id.ResourceGroup, id.HostGroupName, id.HostName, "") if err != nil { diff --git a/azurerm/internal/services/compute/dedicated_host_resource_test.go b/azurerm/internal/services/compute/dedicated_host_resource_test.go index 3d5d42302867..c5aca73d7d4a 100644 --- a/azurerm/internal/services/compute/dedicated_host_resource_test.go +++ b/azurerm/internal/services/compute/dedicated_host_resource_test.go @@ -5,12 +5,11 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -21,10 +20,10 @@ func TestAccDedicatedHost_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_dedicated_host", "test") r := DedicatedHostResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -36,10 +35,10 @@ func TestAccDedicatedHost_basicNewSku(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_dedicated_host", "test") r := DedicatedHostResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicNewSku(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -51,11 +50,11 @@ func TestAccDedicatedHost_autoReplaceOnFailure(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_dedicated_host", "test") r := DedicatedHostResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // Enabled Config: r.autoReplaceOnFailure(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -63,7 +62,7 @@ func TestAccDedicatedHost_autoReplaceOnFailure(t *testing.T) { { // Disabled Config: r.autoReplaceOnFailure(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -71,7 +70,7 @@ func TestAccDedicatedHost_autoReplaceOnFailure(t *testing.T) { { // Enabled Config: r.autoReplaceOnFailure(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -83,31 +82,31 @@ func TestAccDedicatedHost_licenseType(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_dedicated_host", "test") r := DedicatedHostResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.licenceType(data, "None"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.licenceType(data, "Windows_Server_Hybrid"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.licenceType(data, "Windows_Server_Perpetual"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.licenceType(data, "None"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -119,10 +118,10 @@ func TestAccDedicatedHost_complete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_dedicated_host", "test") r := DedicatedHostResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.complete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -134,17 +133,17 @@ func TestAccDedicatedHost_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_dedicated_host", "test") r := DedicatedHostResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.complete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -156,10 +155,10 @@ func TestAccDedicatedHost_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_dedicated_host", "test") r := DedicatedHostResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -167,7 +166,7 @@ func TestAccDedicatedHost_requiresImport(t *testing.T) { }) } -func (t DedicatedHostResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t DedicatedHostResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.DedicatedHostID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/compute/diff_suppress.go b/azurerm/internal/services/compute/diff_suppress.go index 03d6924e9f27..ef7f46dd10c9 100644 --- a/azurerm/internal/services/compute/diff_suppress.go +++ b/azurerm/internal/services/compute/diff_suppress.go @@ -1,9 +1,9 @@ package compute -import "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +import "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" // nolint: deadcode unused -func adminPasswordDiffSuppressFunc(_, old, new string, _ *schema.ResourceData) bool { +func adminPasswordDiffSuppressFunc(_, old, new string, _ *pluginsdk.ResourceData) bool { // this is not the greatest hack in the world, this is just a tribute. if old == "ignored-as-imported" || new == "ignored-as-imported" { return true diff --git a/azurerm/internal/services/compute/disk_access_data_source.go b/azurerm/internal/services/compute/disk_access_data_source.go index c03550504be0..0a28e0aaaa07 100644 --- a/azurerm/internal/services/compute/disk_access_data_source.go +++ b/azurerm/internal/services/compute/disk_access_data_source.go @@ -4,25 +4,25 @@ import ( "fmt" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceDiskAccess() *schema.Resource { - return &schema.Resource{ +func dataSourceDiskAccess() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceDiskAccessRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, @@ -33,7 +33,7 @@ func dataSourceDiskAccess() *schema.Resource { } } -func dataSourceDiskAccessRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceDiskAccessRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.DiskAccessClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/disk_access_data_source_test.go b/azurerm/internal/services/compute/disk_access_data_source_test.go index 61837cb4fe28..39e2233cdd14 100644 --- a/azurerm/internal/services/compute/disk_access_data_source_test.go +++ b/azurerm/internal/services/compute/disk_access_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -19,10 +18,10 @@ func TestAccDataSourceDiskAccess_basic(t *testing.T) { name := fmt.Sprintf("acctestdiskaccess-%d", data.RandomInteger) resourceGroupName := fmt.Sprintf("acctestRG-%d", data.RandomInteger) - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic(data, name, resourceGroupName), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("name").HasValue(name), check.That(data.ResourceName).Key("resource_group_name").HasValue(resourceGroupName), check.That(data.ResourceName).Key("tags.%").HasValue("1"), diff --git a/azurerm/internal/services/compute/disk_access_resource.go b/azurerm/internal/services/compute/disk_access_resource.go index ff7a144467aa..72a9c75e2124 100644 --- a/azurerm/internal/services/compute/disk_access_resource.go +++ b/azurerm/internal/services/compute/disk_access_resource.go @@ -6,7 +6,6 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -17,8 +16,8 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceDiskAccess() *schema.Resource { - return &schema.Resource{ +func resourceDiskAccess() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceDiskAccessCreateUpdate, Read: resourceDiskAccessRead, Update: resourceDiskAccessCreateUpdate, @@ -29,16 +28,16 @@ func resourceDiskAccess() *schema.Resource { return err }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, @@ -52,7 +51,7 @@ func resourceDiskAccess() *schema.Resource { } } -func resourceDiskAccessCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceDiskAccessCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.DiskAccessClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -105,7 +104,7 @@ func resourceDiskAccessCreateUpdate(d *schema.ResourceData, meta interface{}) er return resourceDiskAccessRead(d, meta) } -func resourceDiskAccessRead(d *schema.ResourceData, meta interface{}) error { +func resourceDiskAccessRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.DiskAccessClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -135,7 +134,7 @@ func resourceDiskAccessRead(d *schema.ResourceData, meta interface{}) error { return tags.FlattenAndSet(d, resp.Tags) } -func resourceDiskAccessDelete(d *schema.ResourceData, meta interface{}) error { +func resourceDiskAccessDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.DiskAccessClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/disk_access_resource_test.go b/azurerm/internal/services/compute/disk_access_resource_test.go index 138bfb872f8c..eeec64eb35f8 100644 --- a/azurerm/internal/services/compute/disk_access_resource_test.go +++ b/azurerm/internal/services/compute/disk_access_resource_test.go @@ -5,12 +5,11 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -21,10 +20,10 @@ func TestAccDiskAccess_empty(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_disk_access", "test") r := DiskAccessResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.empty(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -36,10 +35,10 @@ func TestAccDiskAccess_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_disk_access", "test") r := DiskAccessResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.empty(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -54,17 +53,17 @@ func TestAccDiskAccess_import(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_disk_access", "test") r := DiskAccessResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.importConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, }) } -func (t DiskAccessResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t DiskAccessResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.DiskAccessID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/compute/disk_encryption_set_data_source.go b/azurerm/internal/services/compute/disk_encryption_set_data_source.go index f4e0b83a20b5..edf057cf9acd 100644 --- a/azurerm/internal/services/compute/disk_encryption_set_data_source.go +++ b/azurerm/internal/services/compute/disk_encryption_set_data_source.go @@ -4,26 +4,26 @@ import ( "fmt" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceDiskEncryptionSet() *schema.Resource { - return &schema.Resource{ +func dataSourceDiskEncryptionSet() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceDiskEncryptionSetRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, @@ -37,7 +37,7 @@ func dataSourceDiskEncryptionSet() *schema.Resource { } } -func dataSourceDiskEncryptionSetRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceDiskEncryptionSetRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.DiskEncryptionSetsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/disk_encryption_set_data_source_test.go b/azurerm/internal/services/compute/disk_encryption_set_data_source_test.go index 81a200cacf44..bb3e784d2f93 100644 --- a/azurerm/internal/services/compute/disk_encryption_set_data_source_test.go +++ b/azurerm/internal/services/compute/disk_encryption_set_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -15,13 +14,13 @@ type DiskEncryptionSetDataSource struct { func TestAccDataSourceDiskEncryptionSet_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_disk_encryption_set", "test") r := DiskEncryptionSetDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic(data), }, { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("location").Exists(), ), }, diff --git a/azurerm/internal/services/compute/disk_encryption_set_resource.go b/azurerm/internal/services/compute/disk_encryption_set_resource.go index 093e240239fe..15dc717ed76c 100644 --- a/azurerm/internal/services/compute/disk_encryption_set_resource.go +++ b/azurerm/internal/services/compute/disk_encryption_set_resource.go @@ -6,37 +6,35 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/keyvault/client" keyVaultParse "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/keyvault/parse" keyVaultValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/keyvault/validate" resourcesClient "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/client" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceDiskEncryptionSet() *schema.Resource { - return &schema.Resource{ +func resourceDiskEncryptionSet() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceDiskEncryptionSetCreate, Read: resourceDiskEncryptionSetRead, Update: resourceDiskEncryptionSetUpdate, Delete: resourceDiskEncryptionSetDelete, - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(60 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(60 * time.Minute), - Delete: schema.DefaultTimeout(60 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(60 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(60 * time.Minute), + Delete: pluginsdk.DefaultTimeout(60 * time.Minute), }, Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { @@ -44,9 +42,9 @@ func resourceDiskEncryptionSet() *schema.Resource { return err }), - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.DiskEncryptionSetName, @@ -57,33 +55,33 @@ func resourceDiskEncryptionSet() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "key_vault_key_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: keyVaultValidate.NestedItemId, }, "identity": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, // whilst the API Documentation shows optional - attempting to send nothing returns: // `Required parameter 'ResourceIdentity' is missing (null)` // hence this is required Required: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ string(compute.DiskEncryptionSetIdentityTypeSystemAssigned), }, false), }, "principal_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "tenant_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -95,7 +93,7 @@ func resourceDiskEncryptionSet() *schema.Resource { } } -func resourceDiskEncryptionSetCreate(d *schema.ResourceData, meta interface{}) error { +func resourceDiskEncryptionSetCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.DiskEncryptionSetsClient keyVaultsClient := meta.(*clients.Client).KeyVault resourcesClient := meta.(*clients.Client).Resource @@ -165,7 +163,7 @@ func resourceDiskEncryptionSetCreate(d *schema.ResourceData, meta interface{}) e return resourceDiskEncryptionSetRead(d, meta) } -func resourceDiskEncryptionSetRead(d *schema.ResourceData, meta interface{}) error { +func resourceDiskEncryptionSetRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.DiskEncryptionSetsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -206,7 +204,7 @@ func resourceDiskEncryptionSetRead(d *schema.ResourceData, meta interface{}) err return tags.FlattenAndSet(d, resp.Tags) } -func resourceDiskEncryptionSetUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceDiskEncryptionSetUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.DiskEncryptionSetsClient keyVaultsClient := meta.(*clients.Client).KeyVault resourcesClient := meta.(*clients.Client).Resource @@ -256,7 +254,7 @@ func resourceDiskEncryptionSetUpdate(d *schema.ResourceData, meta interface{}) e return resourceDiskEncryptionSetRead(d, meta) } -func resourceDiskEncryptionSetDelete(d *schema.ResourceData, meta interface{}) error { +func resourceDiskEncryptionSetDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.DiskEncryptionSetsClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/disk_encryption_set_resource_test.go b/azurerm/internal/services/compute/disk_encryption_set_resource_test.go index 54fea33e9cf3..2edad37c1657 100644 --- a/azurerm/internal/services/compute/disk_encryption_set_resource_test.go +++ b/azurerm/internal/services/compute/disk_encryption_set_resource_test.go @@ -5,12 +5,11 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -21,10 +20,10 @@ func TestAccDiskEncryptionSet_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_disk_encryption_set", "test") r := DiskEncryptionSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -36,10 +35,10 @@ func TestAccDiskEncryptionSet_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_disk_encryption_set", "test") r := DiskEncryptionSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -51,10 +50,10 @@ func TestAccDiskEncryptionSet_complete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_disk_encryption_set", "test") r := DiskEncryptionSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.complete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -66,17 +65,17 @@ func TestAccDiskEncryptionSet_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_disk_encryption_set", "test") r := DiskEncryptionSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.complete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -88,10 +87,10 @@ func TestAccDiskEncryptionSet_keyRotate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_disk_encryption_set", "test") r := DiskEncryptionSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -99,7 +98,7 @@ func TestAccDiskEncryptionSet_keyRotate(t *testing.T) { // we have to first grant the permission for DiskEncryptionSet to access the KeyVault { Config: r.grantAccessToKeyVault(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -107,7 +106,7 @@ func TestAccDiskEncryptionSet_keyRotate(t *testing.T) { // after the access is granted, we can rotate the key in DiskEncryptionSet { Config: r.keyRotate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -115,7 +114,7 @@ func TestAccDiskEncryptionSet_keyRotate(t *testing.T) { }) } -func (DiskEncryptionSetResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (DiskEncryptionSetResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.DiskEncryptionSetID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/compute/encryption_settings.go b/azurerm/internal/services/compute/encryption_settings.go index 97dcaf5f379d..60be006b43a6 100644 --- a/azurerm/internal/services/compute/encryption_settings.go +++ b/azurerm/internal/services/compute/encryption_settings.go @@ -2,19 +2,19 @@ package compute import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func encryptionSettingsSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func encryptionSettingsSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Required: true, // Azure can change enabled from false to true, but not the other way around, so @@ -23,36 +23,36 @@ func encryptionSettingsSchema() *schema.Schema { }, "disk_encryption_key": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "secret_url": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "source_vault_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, }, }, }, "key_encryption_key": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "key_url": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "source_vault_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, }, diff --git a/azurerm/internal/services/compute/image_data_source.go b/azurerm/internal/services/compute/image_data_source.go index 634fe6de7795..78fb80a90991 100644 --- a/azurerm/internal/services/compute/image_data_source.go +++ b/azurerm/internal/services/compute/image_data_source.go @@ -8,39 +8,39 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceImage() *schema.Resource { - return &schema.Resource{ +func dataSourceImage() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceImageRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name_regex": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringIsValidRegExp, ConflictsWith: []string{"name"}, }, "sort_descending": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ConflictsWith: []string{"name_regex"}, }, @@ -50,37 +50,37 @@ func dataSourceImage() *schema.Resource { "location": azure.SchemaLocationForDataSource(), "zone_resilient": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "os_disk": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "blob_uri": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "caching": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "managed_disk_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "os_state": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "os_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "size_gb": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, }, @@ -88,28 +88,28 @@ func dataSourceImage() *schema.Resource { }, "data_disk": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "blob_uri": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "caching": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "lun": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "managed_disk_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "size_gb": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, }, @@ -121,7 +121,7 @@ func dataSourceImage() *schema.Resource { } } -func dataSourceImageRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceImageRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.ImagesClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/image_data_source_test.go b/azurerm/internal/services/compute/image_data_source_test.go index 4cc06f7b3174..daa1e1c6acea 100644 --- a/azurerm/internal/services/compute/image_data_source_test.go +++ b/azurerm/internal/services/compute/image_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -16,10 +15,10 @@ func TestAccDataSourceImage_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_image", "test") r := ImageDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("name").Exists(), check.That(data.ResourceName).Key("resource_group_name").Exists(), check.That(data.ResourceName).Key("os_disk.#").HasValue("1"), @@ -43,20 +42,20 @@ func TestAccDataSourceImage_localFilter(t *testing.T) { descDataSourceName := "data.azurerm_image.test2" - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { // We have to create the images first explicitly, then retrieve the data source, because in this case we do not have explicit dependency on the image resources Config: r.localFilter_setup(data), }, { Config: r.localFilter(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("name").Exists(), check.That(data.ResourceName).Key("resource_group_name").Exists(), check.That(data.ResourceName).Key("name").HasValue(fmt.Sprintf("def-acctest-%d", data.RandomInteger)), - resource.TestCheckResourceAttrSet(descDataSourceName, "name"), - resource.TestCheckResourceAttrSet(descDataSourceName, "resource_group_name"), - resource.TestCheckResourceAttr(descDataSourceName, "name", fmt.Sprintf("def-acctest-%d", data.RandomInteger)), + acceptance.TestCheckResourceAttrSet(descDataSourceName, "name"), + acceptance.TestCheckResourceAttrSet(descDataSourceName, "resource_group_name"), + acceptance.TestCheckResourceAttr(descDataSourceName, "name", fmt.Sprintf("def-acctest-%d", data.RandomInteger)), ), }, }) diff --git a/azurerm/internal/services/compute/image_resource.go b/azurerm/internal/services/compute/image_resource.go index 81446d2da555..723e40aae252 100644 --- a/azurerm/internal/services/compute/image_resource.go +++ b/azurerm/internal/services/compute/image_resource.go @@ -6,20 +6,19 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceImage() *schema.Resource { - return &schema.Resource{ +func resourceImage() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceImageCreateUpdate, Read: resourceImageRead, Update: resourceImageCreateUpdate, @@ -27,16 +26,16 @@ func resourceImage() *schema.Resource { // TODO: replace this with an importer which validates the ID during import Importer: pluginsdk.DefaultImporter(), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(90 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(90 * time.Minute), - Delete: schema.DefaultTimeout(90 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(90 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(90 * time.Minute), + Delete: pluginsdk.DefaultTimeout(90 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, @@ -46,14 +45,14 @@ func resourceImage() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "zone_resilient": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, ForceNew: true, }, "hyper_v_generation": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: string(compute.HyperVGenerationTypesV1), ForceNew: true, @@ -64,20 +63,20 @@ func resourceImage() *schema.Resource { }, "source_virtual_machine_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: azure.ValidateResourceID, }, "os_disk": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "os_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.StringInSlice([]string{ @@ -87,7 +86,7 @@ func resourceImage() *schema.Resource { }, "os_state": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.StringInSlice([]string{ @@ -97,7 +96,7 @@ func resourceImage() *schema.Resource { }, "managed_disk_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Optional: true, DiffSuppressFunc: suppress.CaseDifference, @@ -105,7 +104,7 @@ func resourceImage() *schema.Resource { }, "blob_uri": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ForceNew: true, @@ -113,7 +112,7 @@ func resourceImage() *schema.Resource { }, "caching": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: string(compute.None), DiffSuppressFunc: suppress.CaseDifference, @@ -125,7 +124,7 @@ func resourceImage() *schema.Resource { }, "size_gb": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, Optional: true, ValidateFunc: validation.NoZeroValues, @@ -135,32 +134,32 @@ func resourceImage() *schema.Resource { }, "data_disk": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "lun": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, }, "managed_disk_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: azure.ValidateResourceID, }, "blob_uri": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ValidateFunc: validation.IsURLWithScheme([]string{"http", "https"}), }, "caching": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: string(compute.None), ValidateFunc: validation.StringInSlice([]string{ @@ -172,7 +171,7 @@ func resourceImage() *schema.Resource { }, "size_gb": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validation.NoZeroValues, @@ -186,7 +185,7 @@ func resourceImage() *schema.Resource { } } -func resourceImageCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceImageCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.ImagesClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -274,7 +273,7 @@ func resourceImageCreateUpdate(d *schema.ResourceData, meta interface{}) error { return resourceImageRead(d, meta) } -func resourceImageRead(d *schema.ResourceData, meta interface{}) error { +func resourceImageRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.ImagesClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -323,7 +322,7 @@ func resourceImageRead(d *schema.ResourceData, meta interface{}) error { return tags.FlattenAndSet(d, resp.Tags) } -func resourceImageDelete(d *schema.ResourceData, meta interface{}) error { +func resourceImageDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.ImagesClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -391,7 +390,7 @@ func flattenAzureRmImageDataDisks(diskImages *[]compute.ImageDataDisk) []interfa return result } -func expandAzureRmImageOsDisk(d *schema.ResourceData) *compute.ImageOSDisk { +func expandAzureRmImageOsDisk(d *pluginsdk.ResourceData) *compute.ImageOSDisk { osDisk := &compute.ImageOSDisk{} disks := d.Get("os_disk").([]interface{}) @@ -432,7 +431,7 @@ func expandAzureRmImageOsDisk(d *schema.ResourceData) *compute.ImageOSDisk { return osDisk } -func expandAzureRmImageDataDisks(d *schema.ResourceData) *[]compute.ImageDataDisk { +func expandAzureRmImageDataDisks(d *pluginsdk.ResourceData) *[]compute.ImageDataDisk { disks := d.Get("data_disk").([]interface{}) dataDisks := make([]compute.ImageDataDisk, 0, len(disks)) diff --git a/azurerm/internal/services/compute/image_resource_test.go b/azurerm/internal/services/compute/image_resource_test.go index dac7f6c656e3..2f7a8ea2f1a9 100644 --- a/azurerm/internal/services/compute/image_resource_test.go +++ b/azurerm/internal/services/compute/image_resource_test.go @@ -6,8 +6,6 @@ import ( "log" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" @@ -15,6 +13,7 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" networkParse "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -25,18 +24,18 @@ func TestAccImage_standaloneImage(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_image", "test") r := ImageResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // need to create a vm and then reference it in the image creation Config: r.setupUnmanagedDisks(data, "LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(r.virtualMachineExists, "azurerm_virtual_machine.testsource"), data.CheckWithClientForResource(r.generalizeVirtualMachine(data), "azurerm_virtual_machine.testsource"), ), }, { Config: r.standaloneImageProvision(data, "LRS", ""), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -48,18 +47,18 @@ func TestAccImage_standaloneImage_hyperVGeneration_V2(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_image", "test") r := ImageResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // need to create a vm and then reference it in the image creation Config: r.setupUnmanagedDisks(data, "LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(r.virtualMachineExists, "azurerm_virtual_machine.testsource"), data.CheckWithClientForResource(r.generalizeVirtualMachine(data), "azurerm_virtual_machine.testsource"), ), }, { Config: r.standaloneImageProvision(data, "LRS", "V2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -71,19 +70,19 @@ func TestAccImage_standaloneImageZoneRedundant(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_image", "test") r := ImageResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // need to create a vm and then reference it in the image creation Config: r.setupUnmanagedDisks(data, "ZRS"), Destroy: false, - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(r.virtualMachineExists, "azurerm_virtual_machine.testsource"), data.CheckWithClientForResource(r.generalizeVirtualMachine(data), "azurerm_virtual_machine.testsource"), ), }, { Config: r.standaloneImageProvision(data, "ZRS", ""), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That("azurerm_image.test").ExistsInAzure(r), ), }, @@ -95,18 +94,18 @@ func TestAccImage_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_image", "test") r := ImageResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // need to create a vm and then reference it in the image creation Config: r.setupUnmanagedDisks(data, "LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(r.virtualMachineExists, "azurerm_virtual_machine.testsource"), data.CheckWithClientForResource(r.generalizeVirtualMachine(data), "azurerm_virtual_machine.testsource"), ), }, { Config: r.standaloneImageProvision(data, "LRS", ""), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -118,18 +117,18 @@ func TestAccImage_customImageFromVMWithUnmanagedDisks(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_image", "test") r := ImageResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // need to create a vm and then reference it in the image creation Config: r.setupUnmanagedDisks(data, "LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(r.virtualMachineExists, "azurerm_virtual_machine.testsource"), data.CheckWithClientForResource(r.generalizeVirtualMachine(data), "azurerm_virtual_machine.testsource"), ), }, { Config: r.customImageFromVMWithUnmanagedDisksProvision(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(r.virtualMachineExists, "azurerm_virtual_machine.testdestination"), ), }, @@ -140,19 +139,19 @@ func TestAccImage_customImageFromVMWithManagedDisks(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_image", "test") r := ImageResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // need to create a vm and then reference it in the image creation Config: r.setupManagedDisks(data), Destroy: false, - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(r.virtualMachineExists, "azurerm_virtual_machine.testsource"), data.CheckWithClientForResource(r.generalizeVirtualMachine(data), "azurerm_virtual_machine.testsource"), ), }, { Config: r.customImageFromManagedDiskVMProvision(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(r.virtualMachineExists, "azurerm_virtual_machine.testdestination"), ), }, @@ -163,26 +162,26 @@ func TestAccImage_customImageFromVMSSWithUnmanagedDisks(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_image", "test") r := ImageResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // need to create a vm and then reference it in the image creation Config: r.setupUnmanagedDisks(data, "LRS"), Destroy: false, - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(r.virtualMachineExists, "azurerm_virtual_machine.testsource"), data.CheckWithClientForResource(r.generalizeVirtualMachine(data), "azurerm_virtual_machine.testsource"), ), }, { Config: r.customImageFromVMSSWithUnmanagedDisksProvision(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(r.virtualMachineScaleSetExists, "azurerm_virtual_machine_scale_set.testdestination"), ), }, }) } -func (ImageResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (ImageResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := azure.ParseAzureResourceID(state.ID) if err != nil { return nil, err @@ -198,8 +197,8 @@ func (ImageResource) Exists(ctx context.Context, clients *clients.Client, state return utils.Bool(resp.ID != nil), nil } -func (ImageResource) generalizeVirtualMachine(data acceptance.TestData) func(context.Context, *clients.Client, *terraform.InstanceState) error { - return func(ctx context.Context, client *clients.Client, state *terraform.InstanceState) error { +func (ImageResource) generalizeVirtualMachine(data acceptance.TestData) func(context.Context, *clients.Client, *pluginsdk.InstanceState) error { + return func(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) error { id, err := parse.VirtualMachineID(state.ID) if err != nil { return err @@ -279,7 +278,7 @@ func (ImageResource) generalizeVirtualMachine(data acceptance.TestData) func(con ssh.LinuxAgentDeprovisionCommand, }, } - if err := sshGeneralizationCommand.Run(); err != nil { + if err := sshGeneralizationCommand.Run(ctx); err != nil { return fmt.Errorf("Bad: running generalization command: %+v", err) } @@ -302,7 +301,7 @@ func (ImageResource) generalizeVirtualMachine(data acceptance.TestData) func(con } } -func (ImageResource) virtualMachineExists(ctx context.Context, client *clients.Client, state *terraform.InstanceState) error { +func (ImageResource) virtualMachineExists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) error { id, err := parse.VirtualMachineID(state.ID) if err != nil { return err @@ -320,7 +319,7 @@ func (ImageResource) virtualMachineExists(ctx context.Context, client *clients.C return nil } -func (ImageResource) virtualMachineScaleSetExists(ctx context.Context, client *clients.Client, state *terraform.InstanceState) error { +func (ImageResource) virtualMachineScaleSetExists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) error { id, err := parse.VirtualMachineScaleSetID(state.ID) if err != nil { return err diff --git a/azurerm/internal/services/compute/images_data_source.go b/azurerm/internal/services/compute/images_data_source.go index 1ead82f1c9c8..3fc4a77a9222 100644 --- a/azurerm/internal/services/compute/images_data_source.go +++ b/azurerm/internal/services/compute/images_data_source.go @@ -6,72 +6,72 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceImages() *schema.Resource { - return &schema.Resource{ +func dataSourceImages() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceImagesRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "resource_group_name": azure.SchemaResourceGroupNameForDataSource(), "tags_filter": tags.Schema(), "images": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "location": location.SchemaComputed(), "zone_resilient": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "os_disk": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "blob_uri": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "caching": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "managed_disk_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "os_state": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "os_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "size_gb": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, }, @@ -79,28 +79,28 @@ func dataSourceImages() *schema.Resource { }, "data_disk": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "blob_uri": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "caching": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "lun": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "managed_disk_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "size_gb": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, }, @@ -115,7 +115,7 @@ func dataSourceImages() *schema.Resource { } } -func dataSourceImagesRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceImagesRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.ImagesClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/images_data_source_test.go b/azurerm/internal/services/compute/images_data_source_test.go index 09c94c49823e..3234a068f6c2 100644 --- a/azurerm/internal/services/compute/images_data_source_test.go +++ b/azurerm/internal/services/compute/images_data_source_test.go @@ -5,7 +5,6 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -17,22 +16,22 @@ func TestAccDataSourceAzureRMImages_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_images", "test") r := ImagesDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { // need to create a vm and then reference it in the image creation Config: ImageResource{}.setupUnmanagedDisks(data, "LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(ImageResource{}.virtualMachineExists, "azurerm_virtual_machine.testsource"), data.CheckWithClientForResource(ImageResource{}.generalizeVirtualMachine(data), "azurerm_virtual_machine.testsource"), ), }, { Config: ImageResource{}.standaloneImageProvision(data, "LRS", ""), - Check: resource.ComposeTestCheckFunc(), + Check: acceptance.ComposeTestCheckFunc(), }, { Config: r.basic(data, "LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("images.#").HasValue("1"), check.That(data.ResourceName).Key("images.0.os_disk.0.os_type").HasValue("Linux"), ), @@ -44,18 +43,18 @@ func TestAccDataSourceAzureRMImages_tagsFilterError(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_images", "test") r := ImagesDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { // need to create a vm and then reference it in the image creation Config: ImageResource{}.setupUnmanagedDisks(data, "LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(ImageResource{}.virtualMachineExists, "azurerm_virtual_machine.testsource"), data.CheckWithClientForResource(ImageResource{}.generalizeVirtualMachine(data), "azurerm_virtual_machine.testsource"), ), }, { Config: ImageResource{}.standaloneImageProvision(data, "LRS", ""), - Check: resource.ComposeTestCheckFunc(), + Check: acceptance.ComposeTestCheckFunc(), }, { Config: r.tagsFilterError(data, "LRS"), @@ -68,22 +67,22 @@ func TestAccDataSourceAzureRMImages_tagsFilter(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_images", "test") r := ImagesDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { // need to create a vm and then reference it in the image creation Config: ImageResource{}.setupUnmanagedDisks(data, "LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(ImageResource{}.virtualMachineExists, "azurerm_virtual_machine.testsource"), data.CheckWithClientForResource(ImageResource{}.generalizeVirtualMachine(data), "azurerm_virtual_machine.testsource"), ), }, { Config: ImageResource{}.standaloneImageProvision(data, "LRS", ""), - Check: resource.ComposeTestCheckFunc(), + Check: acceptance.ComposeTestCheckFunc(), }, { Config: r.tagsFilter(data, "LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("images.#").HasValue("1"), ), }, diff --git a/azurerm/internal/services/compute/linux_virtual_machine_resource.go b/azurerm/internal/services/compute/linux_virtual_machine_resource.go index 6fad8ee3ce0b..51795cf57b2d 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_resource.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_resource.go @@ -9,9 +9,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" azValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" @@ -24,14 +21,15 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/base64" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) // TODO: confirm locking as appropriate -func resourceLinuxVirtualMachine() *schema.Resource { - return &schema.Resource{ +func resourceLinuxVirtualMachine() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceLinuxVirtualMachineCreate, Read: resourceLinuxVirtualMachineRead, Update: resourceLinuxVirtualMachineUpdate, @@ -41,16 +39,16 @@ func resourceLinuxVirtualMachine() *schema.Resource { return err }, importVirtualMachine(compute.Linux, "azurerm_linux_virtual_machine")), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(45 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(45 * time.Minute), - Delete: schema.DefaultTimeout(45 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(45 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(45 * time.Minute), + Delete: pluginsdk.DefaultTimeout(45 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: computeValidate.VirtualMachineName, @@ -62,18 +60,18 @@ func resourceLinuxVirtualMachine() *schema.Resource { // Required "admin_username": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, "network_interface_ids": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, MinItems: 1, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: networkValidate.NetworkInterfaceID, }, }, @@ -81,7 +79,7 @@ func resourceLinuxVirtualMachine() *schema.Resource { "os_disk": virtualMachineOSDiskSchema(), "size": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, @@ -90,7 +88,7 @@ func resourceLinuxVirtualMachine() *schema.Resource { "additional_capabilities": virtualMachineAdditionalCapabilitiesSchema(), "admin_password": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, Sensitive: true, @@ -100,13 +98,13 @@ func resourceLinuxVirtualMachine() *schema.Resource { "admin_ssh_key": SSHKeysSchema(true), "allow_extension_operations": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, "availability_set_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: computeValidate.AvailabilitySetID, @@ -123,7 +121,7 @@ func resourceLinuxVirtualMachine() *schema.Resource { "boot_diagnostics": bootDiagnosticsSchema(), "computer_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, // Computed since we reuse the VM name if one's not specified @@ -136,7 +134,7 @@ func resourceLinuxVirtualMachine() *schema.Resource { "custom_data": base64.OptionalSchema(true), "dedicated_host_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: computeValidate.DedicatedHostID, // the Compute/VM API is broken and returns the Resource Group name in UPPERCASE :shrug: @@ -146,20 +144,20 @@ func resourceLinuxVirtualMachine() *schema.Resource { }, "disable_password_authentication": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, ForceNew: true, Default: true, }, "encryption_at_host_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, }, "eviction_policy": { // only applicable when `priority` is set to `Spot` - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ @@ -169,16 +167,16 @@ func resourceLinuxVirtualMachine() *schema.Resource { }, "extensions_time_budget": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: "PT1H30M", ValidateFunc: azValidate.ISO8601DurationBetween("PT15M", "PT2H"), }, - "identity": virtualMachineIdentitySchema(), + "identity": virtualMachineIdentity{}.Schema(), "license_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ "RHEL_BYOS", @@ -187,7 +185,7 @@ func resourceLinuxVirtualMachine() *schema.Resource { }, "max_bid_price": { - Type: schema.TypeFloat, + Type: pluginsdk.TypeFloat, Optional: true, Default: -1, ValidateFunc: validation.FloatAtLeast(-1.0), @@ -196,7 +194,7 @@ func resourceLinuxVirtualMachine() *schema.Resource { "plan": planSchema(), "priority": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, Default: string(compute.Regular), @@ -207,16 +205,15 @@ func resourceLinuxVirtualMachine() *schema.Resource { }, "provision_vm_agent": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, ForceNew: true, }, "proximity_placement_group_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, - ForceNew: true, ValidateFunc: computeValidate.ProximityPlacementGroupID, // the Compute/VM API is broken and returns the Resource Group name in UPPERCASE :shrug: DiffSuppressFunc: suppress.CaseDifference, @@ -225,7 +222,7 @@ func resourceLinuxVirtualMachine() *schema.Resource { "secret": linuxSecretSchema(), "source_image_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validation.Any( @@ -238,7 +235,7 @@ func resourceLinuxVirtualMachine() *schema.Resource { "source_image_reference": sourceImageReferenceSchema(true), "virtual_machine_scale_set_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ConflictsWith: []string{ @@ -248,7 +245,7 @@ func resourceLinuxVirtualMachine() *schema.Resource { }, "platform_fault_domain": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Default: -1, ForceNew: true, @@ -259,7 +256,7 @@ func resourceLinuxVirtualMachine() *schema.Resource { "tags": tags.Schema(), "zone": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, // this has to be computed because when you are trying to assign this VM to a VMSS in VMO mode with zones, @@ -273,36 +270,36 @@ func resourceLinuxVirtualMachine() *schema.Resource { // Computed "private_ip_address": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "private_ip_addresses": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "public_ip_address": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "public_ip_addresses": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "virtual_machine_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, } } -func resourceLinuxVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error { +func resourceLinuxVirtualMachineCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -373,7 +370,7 @@ func resourceLinuxVirtualMachineCreate(d *schema.ResourceData, meta interface{}) return err } - sshKeysRaw := d.Get("admin_ssh_key").(*schema.Set).List() + sshKeysRaw := d.Get("admin_ssh_key").(*pluginsdk.Set).List() sshKeys := ExpandSSHKeys(sshKeysRaw) params := compute.VirtualMachine{ @@ -526,7 +523,7 @@ func resourceLinuxVirtualMachineCreate(d *schema.ResourceData, meta interface{}) return resourceLinuxVirtualMachineRead(d, meta) } -func resourceLinuxVirtualMachineRead(d *schema.ResourceData, meta interface{}) error { +func resourceLinuxVirtualMachineRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMClient disksClient := meta.(*clients.Client).Compute.DisksClient networkInterfacesClient := meta.(*clients.Client).Network.InterfacesClient @@ -647,7 +644,7 @@ func resourceLinuxVirtualMachineRead(d *schema.ResourceData, meta interface{}) e if err != nil { return fmt.Errorf("flattening `admin_ssh_key`: %+v", err) } - if err := d.Set("admin_ssh_key", schema.NewSet(SSHKeySchemaHash, *flattenedSSHKeys)); err != nil { + if err := d.Set("admin_ssh_key", pluginsdk.NewSet(SSHKeySchemaHash, *flattenedSSHKeys)); err != nil { return fmt.Errorf("setting `admin_ssh_key`: %+v", err) } } @@ -717,7 +714,7 @@ func resourceLinuxVirtualMachineRead(d *schema.ResourceData, meta interface{}) e return tags.FlattenAndSet(d, resp.Tags) } -func resourceLinuxVirtualMachineUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceLinuxVirtualMachineUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMClient ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -874,6 +871,22 @@ func resourceLinuxVirtualMachineUpdate(d *schema.ResourceData, meta interface{}) } } + if d.HasChange("proximity_placement_group_id") { + shouldUpdate = true + + // Code="OperationNotAllowed" Message="Updating proximity placement group of VM is not allowed while the VM is running. Please stop/deallocate the VM and retry the operation." + shouldShutDown = true + shouldDeallocate = true + + if ppgIDRaw, ok := d.GetOk("proximity_placement_group_id"); ok { + update.VirtualMachineProperties.ProximityPlacementGroup = &compute.SubResource{ + ID: utils.String(ppgIDRaw.(string)), + } + } else { + update.VirtualMachineProperties.ProximityPlacementGroup = &compute.SubResource{} + } + } + if d.HasChange("size") { shouldUpdate = true @@ -1112,7 +1125,7 @@ func resourceLinuxVirtualMachineUpdate(d *schema.ResourceData, meta interface{}) return resourceLinuxVirtualMachineRead(d, meta) } -func resourceLinuxVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error { +func resourceLinuxVirtualMachineDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -1135,31 +1148,37 @@ func resourceLinuxVirtualMachineDelete(d *schema.ResourceData, meta interface{}) return fmt.Errorf("retrieving Linux Virtual Machine %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) } - // If the VM was in a Failed state we can skip powering off, since that'll fail - if strings.EqualFold(*existing.ProvisioningState, "failed") { - log.Printf("[DEBUG] Powering Off Linux Virtual Machine was skipped because the VM was in %q state %q (Resource Group %q).", *existing.ProvisioningState, id.Name, id.ResourceGroup) - } else { - //ISSUE: 4920 - // shutting down the Virtual Machine prior to removing it means users are no longer charged for some Azure resources - // thus this can be a large cost-saving when deleting larger instances - // https://docs.microsoft.com/en-us/azure/virtual-machines/states-lifecycle - log.Printf("[DEBUG] Powering Off Linux Virtual Machine %q (Resource Group %q)..", id.Name, id.ResourceGroup) - skipShutdown := !meta.(*clients.Client).Features.VirtualMachine.GracefulShutdown - powerOffFuture, err := client.PowerOff(ctx, id.ResourceGroup, id.Name, utils.Bool(skipShutdown)) - if err != nil { - return fmt.Errorf("powering off Linux Virtual Machine %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) - } - if err := powerOffFuture.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("waiting for power off of Linux Virtual Machine %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + if !meta.(*clients.Client).Features.VirtualMachine.SkipShutdownAndForceDelete { + // If the VM was in a Failed state we can skip powering off, since that'll fail + if strings.EqualFold(*existing.ProvisioningState, "failed") { + log.Printf("[DEBUG] Powering Off Linux Virtual Machine was skipped because the VM was in %q state %q (Resource Group %q).", *existing.ProvisioningState, id.Name, id.ResourceGroup) + } else { + //ISSUE: 4920 + // shutting down the Virtual Machine prior to removing it means users are no longer charged for some Azure resources + // thus this can be a large cost-saving when deleting larger instances + // https://docs.microsoft.com/en-us/azure/virtual-machines/states-lifecycle + log.Printf("[DEBUG] Powering Off Linux Virtual Machine %q (Resource Group %q)..", id.Name, id.ResourceGroup) + skipShutdown := !meta.(*clients.Client).Features.VirtualMachine.GracefulShutdown + powerOffFuture, err := client.PowerOff(ctx, id.ResourceGroup, id.Name, utils.Bool(skipShutdown)) + if err != nil { + return fmt.Errorf("powering off Linux Virtual Machine %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + } + if err := powerOffFuture.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for power off of Linux Virtual Machine %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + } + log.Printf("[DEBUG] Powered Off Linux Virtual Machine %q (Resource Group %q).", id.Name, id.ResourceGroup) } - log.Printf("[DEBUG] Powered Off Linux Virtual Machine %q (Resource Group %q).", id.Name, id.ResourceGroup) } log.Printf("[DEBUG] Deleting Linux Virtual Machine %q (Resource Group %q)..", id.Name, id.ResourceGroup) - // @tombuildsstuff: sending `nil` here omits this value from being sent - which matches - // the previous behaviour - we're only splitting this out so it's clear why - // TODO: support force deletion once it's out of Preview, if applicable + + // Force Delete is in an opt-in Preview and can only be specified (true/false) if the feature is enabled + // as such we default this to `nil` which matches the previous behaviour (where this isn't sent) and + // conditionally set this if required var forceDeletion *bool = nil + if meta.(*clients.Client).Features.VirtualMachine.SkipShutdownAndForceDelete { + forceDeletion = utils.Bool(true) + } deleteFuture, err := client.Delete(ctx, id.ResourceGroup, id.Name, forceDeletion) if err != nil { return fmt.Errorf("deleting Linux Virtual Machine %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) @@ -1219,11 +1238,11 @@ func resourceLinuxVirtualMachineDelete(d *schema.ResourceData, meta interface{}) if !utils.ResponseWasNotFound(virtualMachine.Response) { log.Printf("[INFO] Linux Virtual Machine still exists, waiting on Linux Virtual Machine %q to be deleted", id.Name) - deleteWait := &resource.StateChangeConf{ + deleteWait := &pluginsdk.StateChangeConf{ Pending: []string{"200"}, Target: []string{"404"}, MinTimeout: 30 * time.Second, - Timeout: d.Timeout(schema.TimeoutDelete), + Timeout: d.Timeout(pluginsdk.TimeoutDelete), Refresh: func() (interface{}, string, error) { log.Printf("[INFO] checking on state of Linux Virtual Machine %q", id.Name) resp, err := client.Get(ctx, id.ResourceGroup, id.Name, "") @@ -1237,7 +1256,7 @@ func resourceLinuxVirtualMachineDelete(d *schema.ResourceData, meta interface{}) }, } - if _, err := deleteWait.WaitForState(); err != nil { + if _, err := deleteWait.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for the deletion of Linux Virtual Machine %q (Resource Group %q): %v", id.Name, id.ResourceGroup, err) } } diff --git a/azurerm/internal/services/compute/linux_virtual_machine_resource_auth_test.go b/azurerm/internal/services/compute/linux_virtual_machine_resource_auth_test.go index a465b881c569..a12c39ab7bd1 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_resource_auth_test.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_resource_auth_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,10 +12,10 @@ func TestAccLinuxVirtualMachine_authPassword(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -30,10 +29,10 @@ func TestAccLinuxVirtualMachine_authPasswordAndSSH(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.authPasswordAndSSH(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -47,10 +46,10 @@ func TestAccLinuxVirtualMachine_authSSH(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.authSSH(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -62,10 +61,10 @@ func TestAccLinuxVirtualMachine_authSSHMultipleKeys(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.authSSHMultiple(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/compute/linux_virtual_machine_resource_disk_os_test.go b/azurerm/internal/services/compute/linux_virtual_machine_resource_disk_os_test.go index fa18eee66273..3912950fb1dc 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_resource_disk_os_test.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_resource_disk_os_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,10 +12,10 @@ func TestAccLinuxVirtualMachine_diskOSBasic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSBasic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -28,24 +27,24 @@ func TestAccLinuxVirtualMachine_diskOSCachingType(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSCachingType(data, "None"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.diskOSCachingType(data, "ReadOnly"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.diskOSCachingType(data, "ReadWrite"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -57,10 +56,10 @@ func TestAccLinuxVirtualMachine_diskOSCustomName(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSCustomName(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -72,10 +71,10 @@ func TestAccLinuxVirtualMachine_diskOSCustomSize(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSCustomSize(data, 30), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -87,17 +86,17 @@ func TestAccLinuxVirtualMachine_diskOSCustomSizeExpanded(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSCustomSize(data, 30), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.diskOSCustomSize(data, 60), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -109,10 +108,10 @@ func TestAccLinuxVirtualMachine_diskOSDiskEncryptionSet(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSDiskDiskEncryptionSetEncrypted(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -124,17 +123,17 @@ func TestAccLinuxVirtualMachine_diskOSDiskEncryptionSetUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSDiskDiskEncryptionSetUnencrypted(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.diskOSDiskDiskEncryptionSetEncrypted(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -146,10 +145,10 @@ func TestAccLinuxVirtualMachine_diskOSEphemeral(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSEphemeral(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -161,10 +160,10 @@ func TestAccLinuxVirtualMachine_diskOSStorageTypeStandardLRS(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSStorageAccountType(data, "Standard_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -176,10 +175,10 @@ func TestAccLinuxVirtualMachine_diskOSStorageTypeStandardSSDLRS(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSStorageAccountType(data, "StandardSSD_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -191,10 +190,10 @@ func TestAccLinuxVirtualMachine_diskOSStorageTypePremiumLRS(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSStorageAccountType(data, "Premium_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -206,31 +205,31 @@ func TestAccLinuxVirtualMachine_diskOSStorageTypeUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSStorageAccountType(data, "Standard_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.diskOSStorageAccountType(data, "Premium_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.diskOSStorageAccountType(data, "StandardSSD_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.diskOSStorageAccountType(data, "Standard_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -242,11 +241,11 @@ func TestAccLinuxVirtualMachine_diskOSWriteAcceleratorEnabled(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // Enabled Config: r.diskOSWriteAcceleratorEnabled(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -254,7 +253,7 @@ func TestAccLinuxVirtualMachine_diskOSWriteAcceleratorEnabled(t *testing.T) { { // Disabled Config: r.diskOSWriteAcceleratorEnabled(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -262,7 +261,7 @@ func TestAccLinuxVirtualMachine_diskOSWriteAcceleratorEnabled(t *testing.T) { { // Enabled Config: r.diskOSWriteAcceleratorEnabled(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/compute/linux_virtual_machine_resource_identity_test.go b/azurerm/internal/services/compute/linux_virtual_machine_resource_identity_test.go index 45115358abd1..83291ec0fc22 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_resource_identity_test.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_resource_identity_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,10 +12,10 @@ func TestAccLinuxVirtualMachine_identityNone(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.identityNone(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.%").HasValue("0"), ), @@ -29,10 +28,10 @@ func TestAccLinuxVirtualMachine_identitySystemAssigned(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.identitySystemAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -44,10 +43,10 @@ func TestAccLinuxVirtualMachine_identitySystemAssignedUserAssigned(t *testing.T) data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.identitySystemAssignedUserAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -59,17 +58,17 @@ func TestAccLinuxVirtualMachine_identityUserAssigned(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.identityUserAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.identityUserAssignedUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -81,10 +80,10 @@ func TestAccLinuxVirtualMachine_identityUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.identityNone(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.%").HasValue("0"), ), @@ -92,28 +91,28 @@ func TestAccLinuxVirtualMachine_identityUpdate(t *testing.T) { data.ImportStep(), { Config: r.identitySystemAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.identityUserAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.identitySystemAssignedUserAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.identityNone(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.%").HasValue("0"), ), diff --git a/azurerm/internal/services/compute/linux_virtual_machine_resource_images_test.go b/azurerm/internal/services/compute/linux_virtual_machine_resource_images_test.go index 237fa2117fa0..9c00e8ebbb30 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_resource_images_test.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_resource_images_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,11 +12,11 @@ func TestAccLinuxVirtualMachine_imageFromImage(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // create the original VM Config: r.imageFromExistingMachinePrep(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(ImageResource{}.virtualMachineExists, "azurerm_linux_virtual_machine.source"), data.CheckWithClientForResource(ImageResource{}.generalizeVirtualMachine(data), "azurerm_linux_virtual_machine.source"), ), @@ -25,7 +24,7 @@ func TestAccLinuxVirtualMachine_imageFromImage(t *testing.T) { { // then create an image from that VM, and then create a VM from that image Config: r.imageFromImage(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -37,10 +36,10 @@ func TestAccLinuxVirtualMachine_imageFromPlan(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.imageFromPlan(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -52,11 +51,11 @@ func TestAccLinuxVirtualMachine_imageFromSharedImageGallery(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // create the original VM Config: r.imageFromExistingMachinePrep(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(ImageResource{}.virtualMachineExists, "azurerm_linux_virtual_machine.source"), data.CheckWithClientForResource(ImageResource{}.generalizeVirtualMachine(data), "azurerm_linux_virtual_machine.source"), ), @@ -64,7 +63,7 @@ func TestAccLinuxVirtualMachine_imageFromSharedImageGallery(t *testing.T) { { // then create an image from that VM, and then create a VM from that image Config: r.imageFromSharedImageGallery(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -76,10 +75,10 @@ func TestAccLinuxVirtualMachine_imageFromSourceImageReference(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.imageFromSourceImageReference(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/compute/linux_virtual_machine_resource_network_test.go b/azurerm/internal/services/compute/linux_virtual_machine_resource_network_test.go index 2d51e3613ef5..252dba761bc6 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_resource_network_test.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_resource_network_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -19,10 +18,10 @@ func TestAccLinuxVirtualMachine_networkIPv6(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkIPv6(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").HasValue(""), @@ -36,10 +35,10 @@ func TestAccLinuxVirtualMachine_networkMultiple(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkMultiple(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("private_ip_addresses.#").HasValue("2"), @@ -51,7 +50,7 @@ func TestAccLinuxVirtualMachine_networkMultiple(t *testing.T) { { // update the Primary IP Config: r.networkMultipleUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("private_ip_addresses.#").HasValue("2"), @@ -63,7 +62,7 @@ func TestAccLinuxVirtualMachine_networkMultiple(t *testing.T) { { // remove the secondary IP Config: r.networkMultipleRemoved(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("private_ip_addresses.#").HasValue("1"), @@ -79,10 +78,10 @@ func TestAccLinuxVirtualMachine_networkMultiplePublic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkMultiplePublic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("private_ip_addresses.#").HasValue("2"), @@ -94,7 +93,7 @@ func TestAccLinuxVirtualMachine_networkMultiplePublic(t *testing.T) { { // update the Primary IP Config: r.networkMultiplePublicUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("private_ip_addresses.#").HasValue("2"), @@ -106,7 +105,7 @@ func TestAccLinuxVirtualMachine_networkMultiplePublic(t *testing.T) { { // remove the secondary IP Config: r.networkMultiplePublicRemoved(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("private_ip_addresses.#").HasValue("1"), @@ -122,10 +121,10 @@ func TestAccLinuxVirtualMachine_networkPrivateDynamicIP(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPrivateDynamicIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").HasValue(""), @@ -139,10 +138,10 @@ func TestAccLinuxVirtualMachine_networkPrivateStaticIP(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPrivateStaticIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").HasValue(""), @@ -156,10 +155,10 @@ func TestAccLinuxVirtualMachine_networkPrivateUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPrivateDynamicIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").HasValue(""), @@ -168,7 +167,7 @@ func TestAccLinuxVirtualMachine_networkPrivateUpdate(t *testing.T) { data.ImportStep(), { Config: r.networkPrivateStaticIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").HasValue(""), @@ -182,10 +181,10 @@ func TestAccLinuxVirtualMachine_networkPublicDynamicPrivateDynamicIP(t *testing. data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPublicDynamicPrivateDynamicIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").IsEmpty(), @@ -199,10 +198,10 @@ func TestAccLinuxVirtualMachine_networkPublicDynamicPrivateStaticIP(t *testing.T data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPublicDynamicPrivateStaticIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").IsEmpty(), @@ -216,10 +215,10 @@ func TestAccLinuxVirtualMachine_networkPublicDynamicUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPublicDynamicPrivateDynamicIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").IsEmpty(), @@ -228,7 +227,7 @@ func TestAccLinuxVirtualMachine_networkPublicDynamicUpdate(t *testing.T) { data.ImportStep(), { Config: r.networkPublicDynamicPrivateStaticIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").IsEmpty(), @@ -242,10 +241,10 @@ func TestAccLinuxVirtualMachine_networkPublicStaticPrivateDynamicIP(t *testing.T data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPublicStaticPrivateDynamicIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").Exists(), @@ -259,10 +258,10 @@ func TestAccLinuxVirtualMachine_networkPublicStaticPrivateStaticIP(t *testing.T) data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPublicStaticPrivateStaticIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").Exists(), @@ -276,10 +275,10 @@ func TestAccLinuxVirtualMachine_networkPublicStaticPrivateUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPublicStaticPrivateDynamicIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").Exists(), @@ -288,7 +287,7 @@ func TestAccLinuxVirtualMachine_networkPublicStaticPrivateUpdate(t *testing.T) { data.ImportStep(), { Config: r.networkPublicStaticPrivateStaticIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").Exists(), diff --git a/azurerm/internal/services/compute/linux_virtual_machine_resource_orchestrated_test.go b/azurerm/internal/services/compute/linux_virtual_machine_resource_orchestrated_test.go index 112937a523d3..44bd4374ab31 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_resource_orchestrated_test.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_resource_orchestrated_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,10 +12,10 @@ func TestAccLinuxVirtualMachine_orchestratedZonal(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.orchestratedZonal(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -28,10 +27,10 @@ func TestAccLinuxVirtualMachine_orchestratedWithPlatformFaultDomain(t *testing.T data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.orchestratedWithPlatformFaultDomain(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -43,10 +42,10 @@ func TestAccLinuxVirtualMachine_orchestratedZonalWithProximityPlacementGroup(t * data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.orchestratedZonalWithProximityPlacementGroup(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -58,10 +57,10 @@ func TestAccLinuxVirtualMachine_orchestratedNonZonal(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.orchestratedNonZonal(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -73,10 +72,10 @@ func TestAccLinuxVirtualMachine_orchestratedMultipleZonal(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.orchestratedMultipleZonal(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -88,10 +87,10 @@ func TestAccLinuxVirtualMachine_orchestratedMultipleNonZonal(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.orchestratedMultipleNonZonal(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/compute/linux_virtual_machine_resource_other_test.go b/azurerm/internal/services/compute/linux_virtual_machine_resource_other_test.go index 6c84a99f1591..5207e4df9175 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_resource_other_test.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_resource_other_test.go @@ -5,7 +5,6 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -14,10 +13,10 @@ func TestAccLinuxVirtualMachine_otherAllowExtensionOperationsDefault(t *testing. data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherAllowExtensionOperationsDefault(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("allow_extension_operations").HasValue("true"), ), @@ -30,10 +29,10 @@ func TestAccLinuxVirtualMachine_otherAllowExtensionOperationsDisabled(t *testing data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherAllowExtensionOperationsDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("allow_extension_operations").HasValue("false"), ), @@ -46,10 +45,10 @@ func TestAccLinuxVirtualMachine_otherAllowExtensionOperationsUpdated(t *testing. data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherAllowExtensionOperationsDefault(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("allow_extension_operations").HasValue("true"), ), @@ -57,7 +56,7 @@ func TestAccLinuxVirtualMachine_otherAllowExtensionOperationsUpdated(t *testing. data.ImportStep(), { Config: r.otherAllowExtensionOperationsDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("allow_extension_operations").HasValue("false"), ), @@ -70,10 +69,10 @@ func TestAccLinuxVirtualMachine_otherAllowExtensionOperationsUpdatedWithoutVmAge data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherAllowExtensionOperationsDisabledWithoutVmAgent(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("allow_extension_operations").HasValue("false"), ), @@ -81,7 +80,7 @@ func TestAccLinuxVirtualMachine_otherAllowExtensionOperationsUpdatedWithoutVmAge data.ImportStep(), { Config: r.otherAllowExtensionOperationsEnabledWithoutVmAgent(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("allow_extension_operations").HasValue("true"), ), @@ -94,10 +93,10 @@ func TestAccLinuxVirtualMachine_otherExtensionsTimeBudget(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherExtensionsTimeBudget(data, "PT30M"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("extensions_time_budget").HasValue("PT30M"), ), @@ -110,10 +109,10 @@ func TestAccLinuxVirtualMachine_otherExtensionsTimeBudgetUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherExtensionsTimeBudget(data, "PT30M"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("extensions_time_budget").HasValue("PT30M"), ), @@ -121,7 +120,7 @@ func TestAccLinuxVirtualMachine_otherExtensionsTimeBudgetUpdate(t *testing.T) { data.ImportStep(), { Config: r.otherExtensionsTimeBudget(data, "PT50M"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("extensions_time_budget").HasValue("PT50M"), ), @@ -129,7 +128,7 @@ func TestAccLinuxVirtualMachine_otherExtensionsTimeBudgetUpdate(t *testing.T) { data.ImportStep(), { Config: r.otherExtensionsTimeBudget(data, "PT30M"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("extensions_time_budget").HasValue("PT30M"), ), @@ -142,11 +141,11 @@ func TestAccLinuxVirtualMachine_otherBootDiagnostics(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // Enabled Config: r.otherBootDiagnostics(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -154,7 +153,7 @@ func TestAccLinuxVirtualMachine_otherBootDiagnostics(t *testing.T) { { // Disabled Config: r.otherBootDiagnosticsDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -162,7 +161,7 @@ func TestAccLinuxVirtualMachine_otherBootDiagnostics(t *testing.T) { { // Enabled Config: r.otherBootDiagnostics(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -174,11 +173,11 @@ func TestAccLinuxVirtualMachine_otherBootDiagnosticsManaged(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // Enabled Config: r.otherBootDiagnosticsManaged(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -186,7 +185,7 @@ func TestAccLinuxVirtualMachine_otherBootDiagnosticsManaged(t *testing.T) { { // Disabled Config: r.otherBootDiagnosticsDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -194,7 +193,7 @@ func TestAccLinuxVirtualMachine_otherBootDiagnosticsManaged(t *testing.T) { { // Enabled Config: r.otherBootDiagnosticsManaged(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -206,10 +205,10 @@ func TestAccLinuxVirtualMachine_otherComputerNameDefault(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherComputerNameDefault(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("computer_name").Exists(), ), @@ -222,7 +221,7 @@ func TestAccLinuxVirtualMachine_otherComputerNameDefaultInvalid(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherComputerNameDefaultInvalid(data), ExpectError: regexp.MustCompile("unable to assume default computer name"), @@ -234,10 +233,10 @@ func TestAccLinuxVirtualMachine_otherComputerNameCustom(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherComputerNameCustom(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("computer_name").HasValue("custom-linux-hostname-123"), ), @@ -250,10 +249,10 @@ func TestAccLinuxVirtualMachine_otherCustomData(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherCustomData(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -261,14 +260,29 @@ func TestAccLinuxVirtualMachine_otherCustomData(t *testing.T) { }) } +func TestAccLinuxVirtualMachine_otherSkipShutdownAndForceDelete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") + r := LinuxVirtualMachineResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.otherSkipShutdownAndForceDelete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func TestAccLinuxVirtualMachine_otherLicenseType(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherLicenseType(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("license_type").HasValue("SLES_BYOS"), ), @@ -281,10 +295,10 @@ func TestAccLinuxVirtualMachine_otherPrioritySpot(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherPrioritySpot(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -296,11 +310,11 @@ func TestAccLinuxVirtualMachine_otherPrioritySpotMaxBidPrice(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // expensive, but guarantees this test will pass Config: r.otherPrioritySpotMaxBidPrice(data, "0.5000"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -308,7 +322,7 @@ func TestAccLinuxVirtualMachine_otherPrioritySpotMaxBidPrice(t *testing.T) { { // no limit Config: r.otherPrioritySpotMaxBidPrice(data, "-1"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -320,10 +334,10 @@ func TestAccLinuxVirtualMachine_otherProvisionVMAgentDefault(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherProvisionVMAgentDefault(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("provision_vm_agent").HasValue("true"), ), @@ -336,10 +350,10 @@ func TestAccLinuxVirtualMachine_otherProvisionVMAgentDisabled(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherProvisionVMAgentDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("provision_vm_agent").HasValue("false"), ), @@ -352,10 +366,10 @@ func TestAccLinuxVirtualMachine_otherRequiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.authSSH(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -367,24 +381,24 @@ func TestAccLinuxVirtualMachine_otherSecret(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherSecret(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.otherSecretUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.otherSecretRemoved(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("secret.#").HasValue("0"), ), @@ -397,17 +411,17 @@ func TestAccLinuxVirtualMachine_otherTags(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherTags(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.otherTagsUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -419,10 +433,10 @@ func TestAccLinuxVirtualMachine_otherUltraSsdDefault(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherUltraSsd(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("additional_capabilities.0.ultra_ssd_enabled").HasValue("false"), ), @@ -435,10 +449,10 @@ func TestAccLinuxVirtualMachine_otherUltraSsdEnabled(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherUltraSsd(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("additional_capabilities.0.ultra_ssd_enabled").HasValue("true"), ), @@ -451,10 +465,10 @@ func TestAccLinuxVirtualMachine_otherUltraSsdUpdated(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherUltraSsd(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("additional_capabilities.0.ultra_ssd_enabled").HasValue("false"), ), @@ -462,7 +476,7 @@ func TestAccLinuxVirtualMachine_otherUltraSsdUpdated(t *testing.T) { data.ImportStep(), { Config: r.otherUltraSsd(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("additional_capabilities.0.ultra_ssd_enabled").HasValue("true"), ), @@ -475,10 +489,10 @@ func TestAccLinuxVirtualMachine_otherEncryptionAtHostEnabled(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherEncryptionAtHostEnabled(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -490,24 +504,24 @@ func TestAccLinuxVirtualMachine_otherEncryptionAtHostEnabledUpdate(t *testing.T) data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherEncryptionAtHostEnabled(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.otherEncryptionAtHostEnabled(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.otherEncryptionAtHostEnabled(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -519,10 +533,10 @@ func TestAccLinuxVirtualMachine_otherEncryptionAtHostEnabledWithCMK(t *testing.T data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherEncryptionAtHostEnabledWithCMK(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -534,10 +548,10 @@ func TestAccLinuxVirtualMachine_otherGracefulShutdownDisabled(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherGracefulShutdown(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -548,10 +562,10 @@ func TestAccLinuxVirtualMachine_otherGracefulShutdownEnabled(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherGracefulShutdown(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -995,6 +1009,48 @@ resource "azurerm_linux_virtual_machine" "test" { `, r.template(data), data.RandomInteger) } +func (r LinuxVirtualMachineResource) otherSkipShutdownAndForceDelete(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features { + virtual_machine { + skip_shutdown_and_force_delete = true + } + } +} + +%s + +resource "azurerm_linux_virtual_machine" "test" { + name = "acctestVM-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + size = "Standard_F2" + admin_username = "adminuser" + network_interface_ids = [ + azurerm_network_interface.test.id, + ] + + admin_ssh_key { + username = "adminuser" + public_key = local.first_public_key + } + + os_disk { + caching = "ReadWrite" + storage_account_type = "Standard_LRS" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, r.template(data), data.RandomInteger) +} + func (r LinuxVirtualMachineResource) otherLicenseType(data acceptance.TestData) string { return fmt.Sprintf(` %s diff --git a/azurerm/internal/services/compute/linux_virtual_machine_resource_scaling_test.go b/azurerm/internal/services/compute/linux_virtual_machine_resource_scaling_test.go index c185b0fd4cb3..91e79fd38d62 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_resource_scaling_test.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_resource_scaling_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,11 +12,11 @@ func TestAccLinuxVirtualMachine_scalingAdditionalCapabilitiesUltraSSD(t *testing data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // NOTE: this requires a large-ish machine to provision Config: r.scalingAdditionalCapabilitiesUltraSSD(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -29,10 +28,10 @@ func TestAccLinuxVirtualMachine_scalingAvailabilitySet(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingAvailabilitySet(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -44,10 +43,10 @@ func TestAccLinuxVirtualMachine_scalingDedicatedHost(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingDedicatedHost(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -59,31 +58,31 @@ func TestAccLinuxVirtualMachine_scalingDedicatedHostUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingDedicatedHostInitial(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.scalingDedicatedHost(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.scalingDedicatedHostUpdate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.scalingDedicatedHostRemoved(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -95,10 +94,54 @@ func TestAccLinuxVirtualMachine_scalingProximityPlacementGroup(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingProximityPlacementGroup(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccLinuxVirtualMachine_scalingProximityPlacementGroupUpdate(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") + r := LinuxVirtualMachineResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.scalingProximityPlacementGroup(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.scalingProximityPlacementGroupUpdate(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccLinuxVirtualMachine_scalingProximityPlacementGroupRemoved(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") + r := LinuxVirtualMachineResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.scalingProximityPlacementGroup(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.scalingProximityPlacementGroupRemoved(data), + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -110,24 +153,24 @@ func TestAccLinuxVirtualMachine_scalingMachineSizeUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingMachineSize(data, "Standard_F2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.scalingMachineSize(data, "Standard_F4"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.scalingMachineSize(data, "Standard_F4s_v2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -139,10 +182,10 @@ func TestAccLinuxVirtualMachine_scalingZones(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine", "test") r := LinuxVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingZone(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -470,6 +513,93 @@ resource "azurerm_linux_virtual_machine" "test" { `, r.template(data), data.RandomInteger, data.RandomInteger) } +func (r LinuxVirtualMachineResource) scalingProximityPlacementGroupUpdate(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_proximity_placement_group" "test" { + name = "acctestPPG-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location +} + +resource "azurerm_proximity_placement_group" "second" { + name = "acctestPPG2-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location +} + +resource "azurerm_linux_virtual_machine" "test" { + name = "acctestVM-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + size = "Standard_F2" + admin_username = "adminuser" + proximity_placement_group_id = azurerm_proximity_placement_group.second.id + network_interface_ids = [ + azurerm_network_interface.test.id, + ] + + admin_ssh_key { + username = "adminuser" + public_key = local.first_public_key + } + + os_disk { + caching = "ReadWrite" + storage_account_type = "Standard_LRS" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, r.template(data), data.RandomInteger, data.RandomInteger, data.RandomInteger) +} + +func (r LinuxVirtualMachineResource) scalingProximityPlacementGroupRemoved(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_proximity_placement_group" "test" { + name = "acctestPPG-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location +} + +resource "azurerm_linux_virtual_machine" "test" { + name = "acctestVM-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + size = "Standard_F2" + admin_username = "adminuser" + network_interface_ids = [ + azurerm_network_interface.test.id, + ] + + admin_ssh_key { + username = "adminuser" + public_key = local.first_public_key + } + + os_disk { + caching = "ReadWrite" + storage_account_type = "Standard_LRS" + } + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, r.template(data), data.RandomInteger, data.RandomInteger) +} + func (r LinuxVirtualMachineResource) scalingMachineSize(data acceptance.TestData, size string) string { return fmt.Sprintf(` %s diff --git a/azurerm/internal/services/compute/linux_virtual_machine_resource_test.go b/azurerm/internal/services/compute/linux_virtual_machine_resource_test.go index 12272a87b8b3..1f8d3656a5c6 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_resource_test.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_resource_test.go @@ -4,17 +4,17 @@ import ( "context" "fmt" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) type LinuxVirtualMachineResource struct { } -func (t LinuxVirtualMachineResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t LinuxVirtualMachineResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.VirtualMachineID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_auth_resource_test.go b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_auth_resource_test.go index 55e0e7a6e539..f4959e1d11c3 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_auth_resource_test.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_auth_resource_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,10 +12,10 @@ func TestAccLinuxVirtualMachineScaleSet_authPassword(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -30,10 +29,10 @@ func TestAccLinuxVirtualMachineScaleSet_authSSHKey(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.authSSHKey(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -45,10 +44,10 @@ func TestAccLinuxVirtualMachineScaleSet_authSSHKeyAndPassword(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.authSSHKeyAndPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -62,10 +61,10 @@ func TestAccLinuxVirtualMachineScaleSet_authMultipleSSHKeys(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.authMultipleSSHKeys(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -77,17 +76,17 @@ func TestAccLinuxVirtualMachineScaleSet_authUpdatingSSHKeys(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.authSSHKey(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.authSSHKeyUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -99,11 +98,11 @@ func TestAccLinuxVirtualMachineScaleSet_authDisablePasswordAuthUpdate(t *testing data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // disable it Config: r.authSSHKey(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -113,7 +112,7 @@ func TestAccLinuxVirtualMachineScaleSet_authDisablePasswordAuthUpdate(t *testing { // enable it Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -123,7 +122,7 @@ func TestAccLinuxVirtualMachineScaleSet_authDisablePasswordAuthUpdate(t *testing { // disable it Config: r.authSSHKey(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_disk_data_resource_test.go b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_disk_data_resource_test.go index 8a19d59271f5..8d56fe4198bf 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_disk_data_resource_test.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_disk_data_resource_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,10 +12,10 @@ func TestAccLinuxVirtualMachineScaleSet_disksDataDiskBasic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskBasic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -30,10 +29,10 @@ func TestAccLinuxVirtualMachineScaleSet_disksDataDiskCaching(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskCaching(data, "None"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -42,7 +41,7 @@ func TestAccLinuxVirtualMachineScaleSet_disksDataDiskCaching(t *testing.T) { ), { Config: r.disksDataDiskCaching(data, "ReadOnly"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -51,7 +50,7 @@ func TestAccLinuxVirtualMachineScaleSet_disksDataDiskCaching(t *testing.T) { ), { Config: r.disksDataDiskCaching(data, "ReadWrite"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -65,10 +64,10 @@ func TestAccLinuxVirtualMachineScaleSet_disksDataDiskDiskEncryptionSet(t *testin data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDisk_diskEncryptionSet(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -82,11 +81,11 @@ func TestAccLinuxVirtualMachineScaleSet_disksDataDiskResizing(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // 30GB Config: r.disksDataDiskResize(data, 30), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -96,7 +95,7 @@ func TestAccLinuxVirtualMachineScaleSet_disksDataDiskResizing(t *testing.T) { { // 60GB Config: r.disksDataDiskResize(data, 60), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -110,10 +109,10 @@ func TestAccLinuxVirtualMachineScaleSet_disksDataDiskMultiple(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskMultiple(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -127,10 +126,10 @@ func TestAccLinuxVirtualMachineScaleSet_disksDataDiskRemove(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskBasic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -139,7 +138,7 @@ func TestAccLinuxVirtualMachineScaleSet_disksDataDiskRemove(t *testing.T) { ), { Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -153,11 +152,11 @@ func TestAccLinuxVirtualMachineScaleSet_disksDataDiskScaling(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // no disks Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -167,7 +166,7 @@ func TestAccLinuxVirtualMachineScaleSet_disksDataDiskScaling(t *testing.T) { { // one disk Config: r.disksDataDiskBasic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -177,7 +176,7 @@ func TestAccLinuxVirtualMachineScaleSet_disksDataDiskScaling(t *testing.T) { { // two disks Config: r.disksDataDiskMultiple(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -187,7 +186,7 @@ func TestAccLinuxVirtualMachineScaleSet_disksDataDiskScaling(t *testing.T) { { // no disks Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -201,10 +200,10 @@ func TestAccLinuxVirtualMachineScaleSet_disksDataDiskStorageAccountTypeStandardL data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskStorageAccountType(data, "Standard_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -218,10 +217,10 @@ func TestAccLinuxVirtualMachineScaleSet_disksDataDiskStorageAccountTypeStandardS data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskStorageAccountType(data, "StandardSSD_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -235,10 +234,10 @@ func TestAccLinuxVirtualMachineScaleSet_disksDataDiskStorageAccountTypePremiumLR data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskStorageAccountType(data, "Premium_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -253,10 +252,10 @@ func TestAccLinuxVirtualMachineScaleSet_disksDataDiskStorageAccountTypeUltraSSDL r := LinuxVirtualMachineScaleSetResource{} // Are supported in East US 2, SouthEast Asia, and North Europe, in two availability zones per region - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskStorageAccountTypeUltraSSDLRS(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -271,10 +270,10 @@ func TestAccLinuxVirtualMachineScaleSet_disksDataDiskStorageAccountTypeUltraSSDL r := LinuxVirtualMachineScaleSetResource{} // Are supported in East US 2, SouthEast Asia, and North Europe, in two availability zones per region - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskStorageAccountTypeUltraSSDLRSWithIOPS(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -289,10 +288,10 @@ func TestAccLinuxVirtualMachineScaleSet_disksDataDiskStorageAccountTypeUltraSSDL r := LinuxVirtualMachineScaleSetResource{} // Are supported in East US 2, SouthEast Asia, and North Europe, in two availability zones per region - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskStorageAccountTypeUltraSSDLRSWithMBPS(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -307,10 +306,10 @@ func TestAccLinuxVirtualMachineScaleSet_disksDataDiskStorageAccountTypeUltraSSDL r := LinuxVirtualMachineScaleSetResource{} // Are supported in East US 2, SouthEast Asia, and North Europe, in two availability zones per region - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskStorageAccountTypeUltraSSDLRSWithIOPSAndMBPS(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -324,10 +323,10 @@ func TestAccLinuxVirtualMachineScaleSet_disksDataDiskWriteAcceleratorEnabled(t * data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskWriteAcceleratorEnabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_disk_os_resource_test.go b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_disk_os_resource_test.go index c071b8a70eba..9aa114454d57 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_disk_os_resource_test.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_disk_os_resource_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,10 +12,10 @@ func TestAccLinuxVirtualMachineScaleSet_disksOSDiskCaching(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksOSDiskCaching(data, "None"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -25,7 +24,7 @@ func TestAccLinuxVirtualMachineScaleSet_disksOSDiskCaching(t *testing.T) { ), { Config: r.disksOSDiskCaching(data, "ReadOnly"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -34,7 +33,7 @@ func TestAccLinuxVirtualMachineScaleSet_disksOSDiskCaching(t *testing.T) { ), { Config: r.disksOSDiskCaching(data, "ReadWrite"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -48,11 +47,11 @@ func TestAccLinuxVirtualMachineScaleSet_disksOSDiskCustomSize(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // unset Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -61,7 +60,7 @@ func TestAccLinuxVirtualMachineScaleSet_disksOSDiskCustomSize(t *testing.T) { ), { Config: r.disksOSDiskCustomSize(data, 30), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -71,7 +70,7 @@ func TestAccLinuxVirtualMachineScaleSet_disksOSDiskCustomSize(t *testing.T) { { // resize a second time to confirm https://github.com/Azure/azure-rest-api-specs/issues/1906 Config: r.disksOSDiskCustomSize(data, 60), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -85,10 +84,10 @@ func TestAccLinuxVirtualMachineScaleSet_disksOSDiskDiskEncryptionSet(t *testing. data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksOSDisk_diskEncryptionSet(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -102,10 +101,10 @@ func TestAccLinuxVirtualMachineScaleSet_disksOSDiskEphemeral(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksOSDiskEphemeral(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -119,10 +118,10 @@ func TestAccLinuxVirtualMachineScaleSet_disksOSDiskStorageAccountTypeStandardLRS data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksOSDiskStorageAccountType(data, "Standard_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -136,10 +135,10 @@ func TestAccLinuxVirtualMachineScaleSet_disksOSDiskStorageAccountTypeStandardSSD data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksOSDiskStorageAccountType(data, "StandardSSD_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -153,10 +152,10 @@ func TestAccLinuxVirtualMachineScaleSet_disksOSDiskStorageAccountTypePremiumLRS( data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksOSDiskStorageAccountType(data, "Premium_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -170,10 +169,10 @@ func TestAccLinuxVirtualMachineScaleSet_disksOSDiskWriteAcceleratorEnabled(t *te data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksOSDiskWriteAcceleratorEnabled(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_extensions_test.go b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_extensions_test.go index 02d6e2e15a06..022983454dd1 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_extensions_test.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_extensions_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,10 +12,10 @@ func TestAccLinuxVirtualMachineScaleSet_extensionDoNotRunExtensionsOnOverProvisi data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionDoNotRunExtensionsOnOverProvisionedMachines(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -30,10 +29,10 @@ func TestAccLinuxVirtualMachineScaleSet_extensionDoNotRunExtensionsOnOverProvisi data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionDoNotRunExtensionsOnOverProvisionedMachines(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -42,7 +41,7 @@ func TestAccLinuxVirtualMachineScaleSet_extensionDoNotRunExtensionsOnOverProvisi ), { Config: r.extensionDoNotRunExtensionsOnOverProvisionedMachines(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -51,7 +50,7 @@ func TestAccLinuxVirtualMachineScaleSet_extensionDoNotRunExtensionsOnOverProvisi ), { Config: r.extensionDoNotRunExtensionsOnOverProvisionedMachines(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -65,10 +64,10 @@ func TestAccLinuxVirtualMachineScaleSet_extensionBasic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionBasic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -80,10 +79,10 @@ func TestAccLinuxVirtualMachineScaleSet_extensionOnlySettings(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionOnlySettings(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -95,17 +94,17 @@ func TestAccLinuxVirtualMachineScaleSet_extensionForceUpdateTag(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionForceUpdateTag(data, "first"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("admin_password", "extension.0.protected_settings"), { Config: r.extensionForceUpdateTag(data, "second"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -117,10 +116,10 @@ func TestAccLinuxVirtualMachineScaleSet_extensionsMultiple(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionMultiple(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -132,24 +131,24 @@ func TestAccLinuxVirtualMachineScaleSet_extensionsUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionBasic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("admin_password", "extension.0.protected_settings"), { Config: r.extensionUpdate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("admin_password", "extension.0.protected_settings"), { Config: r.extensionBasic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -161,10 +160,10 @@ func TestAccLinuxVirtualMachineScaleSet_extensionsRollingUpgradeWithHealthExtens data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionsRollingUpgradeWithHealthExtension(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -176,10 +175,10 @@ func TestAccLinuxVirtualMachineScaleSet_extensionsAutomaticUpgradeWithHealthExte data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionsAutomaticUpgradeWithHealthExtension(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -191,17 +190,17 @@ func TestAccLinuxVirtualMachineScaleSet_extensionAutomaticUpgradeUpdate(t *testi data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionsWithHealthExtension(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("admin_password", "extension.0.protected_settings"), { Config: r.extensionsAutomaticUpgradeWithHealthExtension(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -213,10 +212,10 @@ func TestAccLinuxVirtualMachineScaleSet_extensionWithTimeBudget(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionWithTimeBudget(data, "PT30M"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -228,24 +227,24 @@ func TestAccLinuxVirtualMachineScaleSet_extensionWithTimeBudgetUpdate(t *testing data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionWithTimeBudget(data, "PT30M"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("admin_password", "extension.0.protected_settings"), { Config: r.extensionWithTimeBudget(data, "PT1H"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("admin_password", "extension.0.protected_settings"), { Config: r.extensionWithTimeBudget(data, "PT30M"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -257,10 +256,10 @@ func TestAccLinuxVirtualMachineScaleSet_extensionTimeBudgetWithoutExtensions(t * data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionTimeBudgetWithoutExtensions(data, "PT30M"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -272,24 +271,24 @@ func TestAccLinuxVirtualMachineScaleSet_extensionTimeBudgetWithoutExtensionsUpda data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionTimeBudgetWithoutExtensions(data, "PT30M"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("admin_password", "extension.0.protected_settings"), { Config: r.extensionTimeBudgetWithoutExtensions(data, "PT1H"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("admin_password", "extension.0.protected_settings"), { Config: r.extensionTimeBudgetWithoutExtensions(data, "PT30M"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -301,10 +300,10 @@ func TestAccLinuxVirtualMachineScaleSet_extensionsAutomaticUpgradeWithServiceFab data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionsAutomaticUpgradeWithServiceFabricExtension(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_identity_resource_test.go b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_identity_resource_test.go index 4b9331ede0fe..ec3990a4be2e 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_identity_resource_test.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_identity_resource_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,10 +12,10 @@ func TestAccLinuxVirtualMachineScaleSet_identityNone(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.%").HasValue("0"), ), @@ -31,10 +30,10 @@ func TestAccLinuxVirtualMachineScaleSet_identitySystemAssigned(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.identitySystemAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.0.principal_id").Exists(), ), @@ -45,7 +44,7 @@ func TestAccLinuxVirtualMachineScaleSet_identitySystemAssigned(t *testing.T) { { // disable it Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.%").HasValue("0"), ), @@ -55,7 +54,7 @@ func TestAccLinuxVirtualMachineScaleSet_identitySystemAssigned(t *testing.T) { ), { Config: r.identitySystemAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.0.principal_id").Exists(), ), @@ -70,10 +69,10 @@ func TestAccLinuxVirtualMachineScaleSet_identityUserAssigned(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.identityUserAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -83,7 +82,7 @@ func TestAccLinuxVirtualMachineScaleSet_identityUserAssigned(t *testing.T) { { // disable it Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.%").HasValue("0"), ), @@ -93,7 +92,7 @@ func TestAccLinuxVirtualMachineScaleSet_identityUserAssigned(t *testing.T) { ), { Config: r.identityUserAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -103,7 +102,7 @@ func TestAccLinuxVirtualMachineScaleSet_identityUserAssigned(t *testing.T) { { // second Config: r.identityUserAssignedUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -117,10 +116,10 @@ func TestAccLinuxVirtualMachineScaleSet_identitySystemAssignedUserAssigned(t *te data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.identitySystemAssignedUserAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.0.principal_id").Exists(), ), @@ -131,7 +130,7 @@ func TestAccLinuxVirtualMachineScaleSet_identitySystemAssignedUserAssigned(t *te { // disable it Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.%").HasValue("0"), ), @@ -141,7 +140,7 @@ func TestAccLinuxVirtualMachineScaleSet_identitySystemAssignedUserAssigned(t *te ), { Config: r.identitySystemAssignedUserAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.0.principal_id").Exists(), ), diff --git a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_images_resource_test.go b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_images_resource_test.go index 54642ce4df61..e31c65e62b66 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_images_resource_test.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_images_resource_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,10 +12,10 @@ func TestAccLinuxVirtualMachineScaleSet_imagesAutomaticUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.imagesAutomaticUpdate(data, "16.04-LTS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -25,7 +24,7 @@ func TestAccLinuxVirtualMachineScaleSet_imagesAutomaticUpdate(t *testing.T) { ), { Config: r.imagesAutomaticUpdate(data, "18.04-LTS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -39,10 +38,10 @@ func TestAccLinuxVirtualMachineScaleSet_imagesDisableAutomaticUpdate(t *testing. data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.imagesDisableAutomaticUpdate(data, "16.04-LTS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -51,7 +50,7 @@ func TestAccLinuxVirtualMachineScaleSet_imagesDisableAutomaticUpdate(t *testing. ), { Config: r.imagesDisableAutomaticUpdate(data, "18.04-LTS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -65,7 +64,7 @@ func TestAccLinuxVirtualMachineScaleSet_imagesFromCapturedVirtualMachineImage(t data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // provision a standard Virtual Machine with an Unmanaged Disk Config: r.imagesFromVirtualMachinePrerequisitesWithVM(data), @@ -81,7 +80,7 @@ func TestAccLinuxVirtualMachineScaleSet_imagesFromCapturedVirtualMachineImage(t { // then provision a Virtual Machine Scale Set using this image Config: r.imagesFromVirtualMachine(data, "first"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -91,7 +90,7 @@ func TestAccLinuxVirtualMachineScaleSet_imagesFromCapturedVirtualMachineImage(t { // then update the image on this Virtual Machine Scale Set Config: r.imagesFromVirtualMachine(data, "second"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), // Ensure the storage account and disk size has not changed check.That(data.ResourceName).Key("os_disk.0.storage_account_type").HasValue("Standard_LRS"), @@ -108,10 +107,10 @@ func TestAccLinuxVirtualMachineScaleSet_imagesManualUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.imagesManualUpdate(data, "16.04-LTS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -120,7 +119,7 @@ func TestAccLinuxVirtualMachineScaleSet_imagesManualUpdate(t *testing.T) { ), { Config: r.imagesManualUpdate(data, "18.04-LTS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -134,10 +133,10 @@ func TestAccLinuxVirtualMachineScaleSet_imagesManualUpdateExternalRoll(t *testin data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.imagesManualUpdateExternalRoll(data, "16.04-LTS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -146,7 +145,7 @@ func TestAccLinuxVirtualMachineScaleSet_imagesManualUpdateExternalRoll(t *testin ), { Config: r.imagesManualUpdateExternalRoll(data, "18.04-LTS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -160,10 +159,10 @@ func TestAccLinuxVirtualMachineScaleSet_imagesRollingUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.imagesRollingUpdate(data, "16.04-LTS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -172,7 +171,7 @@ func TestAccLinuxVirtualMachineScaleSet_imagesRollingUpdate(t *testing.T) { ), { Config: r.imagesRollingUpdate(data, "18.04-LTS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -186,10 +185,10 @@ func TestAccLinuxVirtualMachineScaleSet_imagesPlan(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.imagesPlan(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_network_resource_test.go b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_network_resource_test.go index 791d96a3a203..ed749fc7083f 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_network_resource_test.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_network_resource_test.go @@ -5,7 +5,6 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -14,10 +13,10 @@ func TestAccLinuxVirtualMachineScaleSet_networkAcceleratedNetworking(t *testing. data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkAcceleratedNetworking(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -31,10 +30,10 @@ func TestAccLinuxVirtualMachineScaleSet_networkAcceleratedNetworkingUpdated(t *t data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkAcceleratedNetworking(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -43,7 +42,7 @@ func TestAccLinuxVirtualMachineScaleSet_networkAcceleratedNetworkingUpdated(t *t ), { Config: r.networkAcceleratedNetworking(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -52,7 +51,7 @@ func TestAccLinuxVirtualMachineScaleSet_networkAcceleratedNetworkingUpdated(t *t ), { Config: r.networkAcceleratedNetworking(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -66,10 +65,10 @@ func TestAccLinuxVirtualMachineScaleSet_networkApplicationGateway(t *testing.T) data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkApplicationGateway(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -83,10 +82,10 @@ func TestAccLinuxVirtualMachineScaleSet_networkApplicationSecurityGroup(t *testi data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkApplicationSecurityGroup(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -100,11 +99,11 @@ func TestAccLinuxVirtualMachineScaleSet_networkApplicationSecurityGroupUpdate(t data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // none Config: r.networkPrivate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -114,7 +113,7 @@ func TestAccLinuxVirtualMachineScaleSet_networkApplicationSecurityGroupUpdate(t { // one Config: r.networkApplicationSecurityGroup(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -124,7 +123,7 @@ func TestAccLinuxVirtualMachineScaleSet_networkApplicationSecurityGroupUpdate(t { // another Config: r.networkApplicationSecurityGroupUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -134,7 +133,7 @@ func TestAccLinuxVirtualMachineScaleSet_networkApplicationSecurityGroupUpdate(t { // none Config: r.networkPrivate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -148,10 +147,10 @@ func TestAccLinuxVirtualMachineScaleSet_networkDNSServers(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkDNSServers(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -160,7 +159,7 @@ func TestAccLinuxVirtualMachineScaleSet_networkDNSServers(t *testing.T) { ), { Config: r.networkDNSServersUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -174,11 +173,11 @@ func TestAccLinuxVirtualMachineScaleSet_networkIPForwarding(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // enabled Config: r.networkIPForwarding(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -188,7 +187,7 @@ func TestAccLinuxVirtualMachineScaleSet_networkIPForwarding(t *testing.T) { { // disabled Config: r.networkPrivate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -198,7 +197,7 @@ func TestAccLinuxVirtualMachineScaleSet_networkIPForwarding(t *testing.T) { { // enabled Config: r.networkIPForwarding(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -212,10 +211,10 @@ func TestAccLinuxVirtualMachineScaleSet_networkIPv6(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkIPv6(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), ExpectError: regexp.MustCompile("Error expanding `network_interface`: An IPv6 Primary IP Configuration is unsupported - instead add a IPv4 IP Configuration as the Primary and make the IPv6 IP Configuration the secondary"), @@ -227,10 +226,10 @@ func TestAccLinuxVirtualMachineScaleSet_networkLoadBalancer(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkLoadBalancer(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -244,10 +243,10 @@ func TestAccLinuxVirtualMachineScaleSet_networkMultipleIPConfigurations(t *testi data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkMultipleIPConfigurations(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -261,10 +260,10 @@ func TestAccLinuxVirtualMachineScaleSet_networkMultipleIPConfigurationsIPv6(t *t data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkMultipleIPConfigurationsIPv6(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -278,10 +277,10 @@ func TestAccLinuxVirtualMachineScaleSet_networkMultipleNICs(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkMultipleNICs(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -295,10 +294,10 @@ func TestAccLinuxVirtualMachineScaleSet_networkMultipleNICsMultipleIPConfigurati data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkMultipleNICsMultipleIPConfigurations(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -312,10 +311,10 @@ func TestAccLinuxVirtualMachineScaleSet_networkMultipleNICsMultiplePublicIPs(t * data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkMultipleNICsMultiplePublicIPs(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -329,10 +328,10 @@ func TestAccLinuxVirtualMachineScaleSet_networkMultipleNICsWithDifferentDNSServe data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkMultipleNICsWithDifferentDNSServers(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -346,10 +345,10 @@ func TestAccLinuxVirtualMachineScaleSet_networkNetworkSecurityGroup(t *testing.T data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkNetworkSecurityGroup(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -363,11 +362,11 @@ func TestAccLinuxVirtualMachineScaleSet_networkNetworkSecurityGroupUpdate(t *tes data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // without Config: r.networkPrivate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -377,7 +376,7 @@ func TestAccLinuxVirtualMachineScaleSet_networkNetworkSecurityGroupUpdate(t *tes { // add one Config: r.networkNetworkSecurityGroup(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -387,7 +386,7 @@ func TestAccLinuxVirtualMachineScaleSet_networkNetworkSecurityGroupUpdate(t *tes { // change it Config: r.networkNetworkSecurityGroupUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -397,7 +396,7 @@ func TestAccLinuxVirtualMachineScaleSet_networkNetworkSecurityGroupUpdate(t *tes { // remove it Config: r.networkPrivate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -411,10 +410,10 @@ func TestAccLinuxVirtualMachineScaleSet_networkPrivate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPrivate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -428,10 +427,10 @@ func TestAccLinuxVirtualMachineScaleSet_networkPublicIP(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPublicIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -445,10 +444,10 @@ func TestAccLinuxVirtualMachineScaleSet_networkPublicIPDomainNameLabel(t *testin data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPublicIPDomainNameLabel(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -462,10 +461,10 @@ func TestAccLinuxVirtualMachineScaleSet_networkPublicIPFromPrefix(t *testing.T) data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPublicIPFromPrefix(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -479,10 +478,10 @@ func TestAccLinuxVirtualMachineScaleSet_networkPublicIPTags(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPublicIPTags(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_other_resource_test.go b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_other_resource_test.go index ea214aa13193..719d0d627ea7 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_other_resource_test.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_other_resource_test.go @@ -5,7 +5,6 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -14,11 +13,11 @@ func TestAccLinuxVirtualMachineScaleSet_otherBootDiagnostics(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // Enabled Config: r.otherBootDiagnostics(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -28,7 +27,7 @@ func TestAccLinuxVirtualMachineScaleSet_otherBootDiagnostics(t *testing.T) { { // Removed Config: r.otherBootDiagnosticsDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -38,7 +37,7 @@ func TestAccLinuxVirtualMachineScaleSet_otherBootDiagnostics(t *testing.T) { { // Enabled Config: r.otherBootDiagnostics(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -52,11 +51,11 @@ func TestAccLinuxVirtualMachineScaleSet_otherBootDiagnosticsManaged(t *testing.T data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // Enabled Config: r.otherBootDiagnosticsManaged(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -66,7 +65,7 @@ func TestAccLinuxVirtualMachineScaleSet_otherBootDiagnosticsManaged(t *testing.T { // Removed Config: r.otherBootDiagnosticsDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -76,7 +75,7 @@ func TestAccLinuxVirtualMachineScaleSet_otherBootDiagnosticsManaged(t *testing.T { // Enabled Config: r.otherBootDiagnosticsManaged(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -90,10 +89,10 @@ func TestAccLinuxVirtualMachineScaleSet_otherComputerNamePrefix(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherComputerNamePrefix(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -107,7 +106,7 @@ func TestAccLinuxVirtualMachineScaleSet_otherComputerNamePrefixInvalid(t *testin data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherComputerNamePrefixInvalid(data), ExpectError: regexp.MustCompile("unable to assume default computer name prefix"), @@ -119,10 +118,10 @@ func TestAccLinuxVirtualMachineScaleSet_otherCustomData(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherCustomData(data, "/bin/bash"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -132,7 +131,7 @@ func TestAccLinuxVirtualMachineScaleSet_otherCustomData(t *testing.T) { ), { Config: r.otherCustomData(data, "/bin/zsh"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -143,7 +142,7 @@ func TestAccLinuxVirtualMachineScaleSet_otherCustomData(t *testing.T) { { // removed Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -154,14 +153,31 @@ func TestAccLinuxVirtualMachineScaleSet_otherCustomData(t *testing.T) { }) } +func TestAccLinuxVirtualMachineScaleSet_otherForceDelete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") + r := LinuxVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.otherForceDelete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep( + "admin_password", + ), + }) +} + func TestAccLinuxVirtualMachineScaleSet_otherPrioritySpotDeallocate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherPrioritySpot(data, "Deallocate"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -175,10 +191,10 @@ func TestAccLinuxVirtualMachineScaleSet_otherPrioritySpotDelete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherPrioritySpot(data, "Delete"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -192,11 +208,11 @@ func TestAccLinuxVirtualMachineScaleSet_otherPrioritySpotMaxBidPrice(t *testing. data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // expensive, but guarantees this test will pass Config: r.otherPrioritySpotMaxBidPrice(data, "0.5000"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -205,7 +221,7 @@ func TestAccLinuxVirtualMachineScaleSet_otherPrioritySpotMaxBidPrice(t *testing. ), { Config: r.otherPrioritySpotMaxBidPrice(data, "-1"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -219,10 +235,10 @@ func TestAccLinuxVirtualMachineScaleSet_otherPriorityRegular(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherPriorityRegular(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -236,10 +252,10 @@ func TestAccLinuxVirtualMachineScaleSet_otherRequiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -254,10 +270,10 @@ func TestAccLinuxVirtualMachineScaleSet_otherSecret(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherSecret(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -267,7 +283,7 @@ func TestAccLinuxVirtualMachineScaleSet_otherSecret(t *testing.T) { { // update Config: r.otherSecretUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -278,7 +294,7 @@ func TestAccLinuxVirtualMachineScaleSet_otherSecret(t *testing.T) { { // removed Config: r.otherSecretRemoved(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -292,10 +308,10 @@ func TestAccLinuxVirtualMachineScaleSet_otherTags(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherTags(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -305,7 +321,7 @@ func TestAccLinuxVirtualMachineScaleSet_otherTags(t *testing.T) { { // add one Config: r.otherTagsUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -315,7 +331,7 @@ func TestAccLinuxVirtualMachineScaleSet_otherTags(t *testing.T) { { // remove all Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -329,10 +345,10 @@ func TestAccLinuxVirtualMachineScaleSet_otherVMAgent(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherVMAgent(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -346,10 +362,10 @@ func TestAccLinuxVirtualMachineScaleSet_otherVMAgentDisabled(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherVMAgent(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -363,16 +379,16 @@ func TestAccLinuxVirtualMachineScaleSet_updateHealthProbe(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.updateLoadBalancerHealthProbeSKUBasic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Config: r.updateLoadBalancerHealthProbeSKUStandard(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -383,10 +399,10 @@ func TestAccLinuxVirtualMachineScaleSet_otherScaleInPolicy(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherScaleInPolicy(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("scale_in_policy").HasValue("Default"), ), @@ -401,11 +417,11 @@ func TestAccLinuxVirtualMachineScaleSet_otherTerminateNotification(t *testing.T) data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ // turn terminate notification on { Config: r.otherTerminateNotification(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("terminate_notification.#").HasValue("1"), check.That(data.ResourceName).Key("terminate_notification.0.enabled").HasValue("true"), @@ -417,7 +433,7 @@ func TestAccLinuxVirtualMachineScaleSet_otherTerminateNotification(t *testing.T) // turn terminate notification off { Config: r.otherTerminateNotification(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("terminate_notification.#").HasValue("1"), check.That(data.ResourceName).Key("terminate_notification.0.enabled").HasValue("false"), @@ -429,7 +445,7 @@ func TestAccLinuxVirtualMachineScaleSet_otherTerminateNotification(t *testing.T) // turn terminate notification on again { Config: r.otherTerminateNotification(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("terminate_notification.#").HasValue("1"), check.That(data.ResourceName).Key("terminate_notification.0.enabled").HasValue("true"), @@ -445,11 +461,11 @@ func TestAccLinuxVirtualMachineScaleSet_otherAutomaticRepairsPolicy(t *testing.T data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ // turn automatic repair on { Config: r.otherAutomaticRepairsPolicy(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -459,7 +475,7 @@ func TestAccLinuxVirtualMachineScaleSet_otherAutomaticRepairsPolicy(t *testing.T // turn automatic repair off { Config: r.otherAutomaticRepairsPolicy(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -469,7 +485,7 @@ func TestAccLinuxVirtualMachineScaleSet_otherAutomaticRepairsPolicy(t *testing.T // turn automatic repair on again { Config: r.otherAutomaticRepairsPolicy(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -483,10 +499,10 @@ func TestAccLinuxVirtualMachineScaleSet_otherUpgradeMode(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherUpgradeMode(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -500,10 +516,10 @@ func TestAccLinuxVirtualMachineScaleSet_otherEncryptionAtHost(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherEncryptionAtHost(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -515,24 +531,24 @@ func TestAccLinuxVirtualMachineScaleSet_otherEncryptionAtHostUpdate(t *testing.T data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherEncryptionAtHost(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("admin_password", "extension.0.protected_settings"), { Config: r.otherEncryptionAtHost(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("admin_password", "extension.0.protected_settings"), { Config: r.otherEncryptionAtHost(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -544,10 +560,10 @@ func TestAccLinuxVirtualMachineScaleSet_otherEncryptionAtHostWithCMK(t *testing. data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherEncryptionAtHostWithCMK(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -559,10 +575,10 @@ func TestAccLinuxVirtualMachineScaleSet_otherPlatformFaultDomainCount(t *testing data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherPlatformFaultDomainCount(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -574,10 +590,10 @@ func TestAccLinuxVirtualMachineScaleSet_otherRollingUpgradePolicyUpdate(t *testi data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherRollingUpgradePolicyUpdate(data, 10, 10, 10, "PT0S"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -586,7 +602,7 @@ func TestAccLinuxVirtualMachineScaleSet_otherRollingUpgradePolicyUpdate(t *testi ), { Config: r.otherRollingUpgradePolicyUpdate(data, 20, 20, 20, "PT1S"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -600,10 +616,10 @@ func TestAccLinuxVirtualMachineScaleSet_otherHealthProbeUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherHealthProbe(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -612,7 +628,7 @@ func TestAccLinuxVirtualMachineScaleSet_otherHealthProbeUpdate(t *testing.T) { ), { Config: r.otherHealthProbeUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -900,6 +916,55 @@ resource "azurerm_linux_virtual_machine_scale_set" "test" { `, r.template(data), data.RandomInteger, customData) } +func (r LinuxVirtualMachineScaleSetResource) otherForceDelete(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features { + virtual_machine_scale_set { + force_delete = true + } + } +} + +%s + +resource "azurerm_linux_virtual_machine_scale_set" "test" { + name = "acctestvmss-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + sku = "Standard_F2" + instances = 1 + admin_username = "adminuser" + admin_password = "P@ssword1234!" + + disable_password_authentication = false + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + network_interface { + name = "example" + primary = true + + ip_configuration { + name = "internal" + primary = true + subnet_id = azurerm_subnet.test.id + } + } +} +`, r.template(data), data.RandomInteger) +} + func (r LinuxVirtualMachineScaleSetResource) otherPrioritySpot(data acceptance.TestData, evictionPolicy string) string { return fmt.Sprintf(` %s diff --git a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_resource.go b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_resource.go index 46de3149af60..a17e9d687012 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_resource.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_resource.go @@ -6,8 +6,6 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" azValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" @@ -18,12 +16,13 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/base64" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceLinuxVirtualMachineScaleSet() *schema.Resource { - return &schema.Resource{ +func resourceLinuxVirtualMachineScaleSet() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceLinuxVirtualMachineScaleSetCreate, Read: resourceLinuxVirtualMachineScaleSetRead, Update: resourceLinuxVirtualMachineScaleSetUpdate, @@ -34,19 +33,19 @@ func resourceLinuxVirtualMachineScaleSet() *schema.Resource { return err }, importVirtualMachineScaleSet(compute.Linux, "azurerm_linux_virtual_machine_scale_set")), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(time.Minute * 30), - Update: schema.DefaultTimeout(time.Minute * 60), - Read: schema.DefaultTimeout(time.Minute * 5), - Delete: schema.DefaultTimeout(time.Minute * 30), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(time.Minute * 30), + Update: pluginsdk.DefaultTimeout(time.Minute * 60), + Read: pluginsdk.DefaultTimeout(time.Minute * 5), + Delete: pluginsdk.DefaultTimeout(time.Minute * 30), }, // TODO: exposing requireGuestProvisionSignal once it's available // https://github.com/Azure/azure-rest-api-specs/pull/7246 - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.VirtualMachineName, @@ -58,7 +57,7 @@ func resourceLinuxVirtualMachineScaleSet() *schema.Resource { // Required "admin_username": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, @@ -69,13 +68,13 @@ func resourceLinuxVirtualMachineScaleSet() *schema.Resource { "os_disk": VirtualMachineScaleSetOSDiskSchema(), "instances": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, ValidateFunc: validation.IntAtLeast(0), }, "sku": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, @@ -84,7 +83,7 @@ func resourceLinuxVirtualMachineScaleSet() *schema.Resource { "additional_capabilities": VirtualMachineScaleSetAdditionalCapabilitiesSchema(), "admin_password": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, Sensitive: true, @@ -100,7 +99,7 @@ func resourceLinuxVirtualMachineScaleSet() *schema.Resource { "boot_diagnostics": bootDiagnosticsSchema(), "computer_name_prefix": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, // Computed since we reuse the VM name if one's not specified @@ -115,25 +114,25 @@ func resourceLinuxVirtualMachineScaleSet() *schema.Resource { "data_disk": VirtualMachineScaleSetDataDiskSchema(), "disable_password_authentication": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, "do_not_run_extensions_on_overprovisioned_machines": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, "encryption_at_host_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, }, "eviction_policy": { // only applicable when `priority` is set to `Spot` - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ @@ -145,14 +144,14 @@ func resourceLinuxVirtualMachineScaleSet() *schema.Resource { "extension": VirtualMachineScaleSetExtensionsSchema(), "extensions_time_budget": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: "PT1H30M", ValidateFunc: azValidate.ISO8601DurationBetween("PT15M", "PT2H"), }, "health_probe_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: azure.ValidateResourceID, }, @@ -160,14 +159,14 @@ func resourceLinuxVirtualMachineScaleSet() *schema.Resource { "identity": VirtualMachineScaleSetIdentitySchema(), "max_bid_price": { - Type: schema.TypeFloat, + Type: pluginsdk.TypeFloat, Optional: true, Default: -1, ValidateFunc: validate.SpotMaxPrice, }, "overprovision": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, @@ -175,14 +174,14 @@ func resourceLinuxVirtualMachineScaleSet() *schema.Resource { "plan": planSchema(), "platform_fault_domain_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, ForceNew: true, Computed: true, }, "priority": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, Default: string(compute.Regular), @@ -193,14 +192,14 @@ func resourceLinuxVirtualMachineScaleSet() *schema.Resource { }, "provision_vm_agent": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, ForceNew: true, }, "proximity_placement_group_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validate.ProximityPlacementGroupID, @@ -213,13 +212,13 @@ func resourceLinuxVirtualMachineScaleSet() *schema.Resource { "secret": linuxSecretSchema(), "single_placement_group": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, "source_image_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.Any( validate.ImageID, @@ -233,7 +232,7 @@ func resourceLinuxVirtualMachineScaleSet() *schema.Resource { "tags": tags.Schema(), "upgrade_mode": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, Default: string(compute.Manual), @@ -245,14 +244,14 @@ func resourceLinuxVirtualMachineScaleSet() *schema.Resource { }, "zone_balance": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, ForceNew: true, Default: false, }, "scale_in_policy": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: string(compute.Default), ValidateFunc: validation.StringInSlice([]string{ @@ -268,14 +267,14 @@ func resourceLinuxVirtualMachineScaleSet() *schema.Resource { // Computed "unique_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, } } -func resourceLinuxVirtualMachineScaleSetCreate(d *schema.ResourceData, meta interface{}) error { +func resourceLinuxVirtualMachineScaleSetCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMScaleSetClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -335,7 +334,7 @@ func resourceLinuxVirtualMachineScaleSetCreate(d *schema.ResourceData, meta inte return err } - sshKeysRaw := d.Get("admin_ssh_key").(*schema.Set).List() + sshKeysRaw := d.Get("admin_ssh_key").(*pluginsdk.Set).List() sshKeys := ExpandSSHKeys(sshKeysRaw) healthProbeId := d.Get("health_probe_id").(string) @@ -417,7 +416,7 @@ func resourceLinuxVirtualMachineScaleSetCreate(d *schema.ResourceData, meta inte hasHealthExtension := false if vmExtensionsRaw, ok := d.GetOk("extension"); ok { - virtualMachineProfile.ExtensionProfile, hasHealthExtension, err = expandVirtualMachineScaleSetExtensions(vmExtensionsRaw.(*schema.Set).List()) + virtualMachineProfile.ExtensionProfile, hasHealthExtension, err = expandVirtualMachineScaleSetExtensions(vmExtensionsRaw.(*pluginsdk.Set).List()) if err != nil { return err } @@ -553,7 +552,7 @@ func resourceLinuxVirtualMachineScaleSetCreate(d *schema.ResourceData, meta inte return resourceLinuxVirtualMachineScaleSetRead(d, meta) } -func resourceLinuxVirtualMachineScaleSetUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceLinuxVirtualMachineScaleSetUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMScaleSetClient ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -647,7 +646,7 @@ func resourceLinuxVirtualMachineScaleSetUpdate(d *schema.ResourceData, meta inte linuxConfig := compute.LinuxConfiguration{} if d.HasChange("admin_ssh_key") { - sshKeysRaw := d.Get("admin_ssh_key").(*schema.Set).List() + sshKeysRaw := d.Get("admin_ssh_key").(*pluginsdk.Set).List() sshKeys := ExpandSSHKeys(sshKeysRaw) linuxConfig.SSH = &compute.SSHConfiguration{ PublicKeys: &sshKeys, @@ -816,7 +815,7 @@ func resourceLinuxVirtualMachineScaleSetUpdate(d *schema.ResourceData, meta inte if d.HasChanges("extension", "extensions_time_budget") { updateInstances = true - extensionProfile, _, err := expandVirtualMachineScaleSetExtensions(d.Get("extension").(*schema.Set).List()) + extensionProfile, _, err := expandVirtualMachineScaleSetExtensions(d.Get("extension").(*pluginsdk.Set).List()) if err != nil { return err } @@ -847,7 +846,7 @@ func resourceLinuxVirtualMachineScaleSetUpdate(d *schema.ResourceData, meta inte return resourceLinuxVirtualMachineScaleSetRead(d, meta) } -func resourceLinuxVirtualMachineScaleSetRead(d *schema.ResourceData, meta interface{}) error { +func resourceLinuxVirtualMachineScaleSetRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMScaleSetClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -985,7 +984,7 @@ func resourceLinuxVirtualMachineScaleSetRead(d *schema.ResourceData, meta interf if err != nil { return fmt.Errorf("Error flattening `admin_ssh_key`: %+v", err) } - if err := d.Set("admin_ssh_key", schema.NewSet(SSHKeySchemaHash, *flattenedSshKeys)); err != nil { + if err := d.Set("admin_ssh_key", pluginsdk.NewSet(SSHKeySchemaHash, *flattenedSshKeys)); err != nil { return fmt.Errorf("Error setting `admin_ssh_key`: %+v", err) } } @@ -1054,7 +1053,7 @@ func resourceLinuxVirtualMachineScaleSetRead(d *schema.ResourceData, meta interf return tags.FlattenAndSet(d, resp.Tags) } -func resourceLinuxVirtualMachineScaleSetDelete(d *schema.ResourceData, meta interface{}) error { +func resourceLinuxVirtualMachineScaleSetDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMScaleSetClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -1102,7 +1101,7 @@ func resourceLinuxVirtualMachineScaleSetDelete(d *schema.ResourceData, meta inte } log.Printf("[DEBUG] Deleting Linux Virtual Machine Scale Set %q (Resource Group %q)..", id.Name, id.ResourceGroup) - // @ArcturusZhang (mimicking from linux_virtual_machine_resource.go): sending `nil` here omits this value from being sent + // @ArcturusZhang (mimicking from linux_virtual_machine_pluginsdk.go): sending `nil` here omits this value from being sent // which matches the previous behaviour - we're only splitting this out so it's clear why // TODO: support force deletion once it's out of Preview, if applicable var forceDeletion *bool = nil diff --git a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_resource_test.go b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_resource_test.go index be85600de7a0..819327715c3b 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_resource_test.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_resource_test.go @@ -4,17 +4,17 @@ import ( "context" "fmt" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) type LinuxVirtualMachineScaleSetResource struct { } -func (r LinuxVirtualMachineScaleSetResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (r LinuxVirtualMachineScaleSetResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.VirtualMachineScaleSetID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_scaling_resource_test.go b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_scaling_resource_test.go index 60976a4be4c1..f2b126e2f8bf 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_scaling_resource_test.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_scaling_resource_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,10 +12,10 @@ func TestAccLinuxVirtualMachineScaleSet_scalingAutoScale(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingAutoScale(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -30,10 +29,10 @@ func TestAccLinuxVirtualMachineScaleSet_scalingInstanceCount(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingInstanceCount(data, 1), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -42,7 +41,7 @@ func TestAccLinuxVirtualMachineScaleSet_scalingInstanceCount(t *testing.T) { ), { Config: r.scalingInstanceCount(data, 3), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -51,7 +50,7 @@ func TestAccLinuxVirtualMachineScaleSet_scalingInstanceCount(t *testing.T) { ), { Config: r.scalingInstanceCount(data, 5), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -61,7 +60,7 @@ func TestAccLinuxVirtualMachineScaleSet_scalingInstanceCount(t *testing.T) { { // update the count but the `sku` should be ignored Config: r.scalingInstanceCountIgnoreUpdatedSku(data, 3), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -80,10 +79,10 @@ func TestAccLinuxVirtualMachineScaleSet_scalingOverProvisionDisabled(t *testing. data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingOverProvisionDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -97,10 +96,10 @@ func TestAccLinuxVirtualMachineScaleSet_scalingProximityPlacementGroup(t *testin data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingProximityPlacementGroup(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -114,10 +113,10 @@ func TestAccLinuxVirtualMachineScaleSet_scalingSinglePlacementGroupDisabled(t *t data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingSinglePlacementGroupDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -131,10 +130,10 @@ func TestAccLinuxVirtualMachineScaleSet_scalingSinglePlacementGroupDisabledUpdat data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -143,7 +142,7 @@ func TestAccLinuxVirtualMachineScaleSet_scalingSinglePlacementGroupDisabledUpdat ), { Config: r.scalingSinglePlacementGroupDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -157,10 +156,10 @@ func TestAccLinuxVirtualMachineScaleSet_scalingUpdateSku(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingUpdateSku(data, "Standard_F2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -169,7 +168,7 @@ func TestAccLinuxVirtualMachineScaleSet_scalingUpdateSku(t *testing.T) { ), { Config: r.scalingUpdateSku(data, "Standard_F4"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -178,7 +177,7 @@ func TestAccLinuxVirtualMachineScaleSet_scalingUpdateSku(t *testing.T) { ), { Config: r.scalingUpdateSku(data, "Standard_F2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -188,7 +187,7 @@ func TestAccLinuxVirtualMachineScaleSet_scalingUpdateSku(t *testing.T) { { // confirms that the `instances` count comes from the API Config: r.scalingUpdateSkuIgnoredUpdatedCount(data, "Standard_F2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -203,10 +202,10 @@ func TestAccLinuxVirtualMachineScaleSet_scalingZonesSingle(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingZonesSingle(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -220,10 +219,10 @@ func TestAccLinuxVirtualMachineScaleSet_scalingZonesMultiple(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingZonesMultiple(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -237,10 +236,10 @@ func TestAccLinuxVirtualMachineScaleSet_scalingZonesBalance(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_linux_virtual_machine_scale_set", "test") r := LinuxVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingZonesBalance(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/compute/managed_disk_data_source.go b/azurerm/internal/services/compute/managed_disk_data_source.go index 792716389a84..0c32e6db1d47 100644 --- a/azurerm/internal/services/compute/managed_disk_data_source.go +++ b/azurerm/internal/services/compute/managed_disk_data_source.go @@ -4,83 +4,83 @@ import ( "fmt" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceManagedDisk() *schema.Resource { - return &schema.Resource{ +func dataSourceManagedDisk() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceManagedDiskRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "resource_group_name": azure.SchemaResourceGroupNameForDataSource(), "create_option": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "disk_encryption_set_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "disk_iops_read_write": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "disk_mbps_read_write": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "disk_size_gb": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "image_reference_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "os_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "source_resource_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "source_uri": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "storage_account_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "storage_account_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, @@ -91,7 +91,7 @@ func dataSourceManagedDisk() *schema.Resource { } } -func dataSourceManagedDiskRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceManagedDiskRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.DisksClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/managed_disk_data_source_test.go b/azurerm/internal/services/compute/managed_disk_data_source_test.go index 6e162fa80bad..322f0e13422a 100644 --- a/azurerm/internal/services/compute/managed_disk_data_source_test.go +++ b/azurerm/internal/services/compute/managed_disk_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -19,10 +18,10 @@ func TestAccDataSourceManagedDisk_basic(t *testing.T) { name := fmt.Sprintf("acctestmanageddisk-%d", data.RandomInteger) resourceGroupName := fmt.Sprintf("acctestRG-%d", data.RandomInteger) - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic(data, name, resourceGroupName), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("name").HasValue(name), check.That(data.ResourceName).Key("resource_group_name").HasValue(resourceGroupName), check.That(data.ResourceName).Key("storage_account_type").HasValue("Premium_LRS"), @@ -43,10 +42,10 @@ func TestAccDataSourceManagedDisk_basic_withUltraSSD(t *testing.T) { name := fmt.Sprintf("acctestmanageddisk-%d", data.RandomInteger) resourceGroupName := fmt.Sprintf("acctestRG-%d", data.RandomInteger) - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic_withUltraSSD(data, name, resourceGroupName), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("disk_iops_read_write").HasValue("101"), check.That(data.ResourceName).Key("disk_mbps_read_write").HasValue("10"), ), diff --git a/azurerm/internal/services/compute/managed_disk_resource.go b/azurerm/internal/services/compute/managed_disk_resource.go index 6cc30a17eff9..6f8ebaa05668 100644 --- a/azurerm/internal/services/compute/managed_disk_resource.go +++ b/azurerm/internal/services/compute/managed_disk_resource.go @@ -6,25 +6,23 @@ import ( "strings" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceManagedDisk() *schema.Resource { - return &schema.Resource{ +func resourceManagedDisk() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceManagedDiskCreate, Read: resourceManagedDiskRead, Update: resourceManagedDiskUpdate, @@ -35,16 +33,16 @@ func resourceManagedDisk() *schema.Resource { return err }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, @@ -56,7 +54,7 @@ func resourceManagedDisk() *schema.Resource { "zones": azure.SchemaSingleZone(), "storage_account_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ string(compute.StandardLRS), @@ -68,7 +66,7 @@ func resourceManagedDisk() *schema.Resource { }, "create_option": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ @@ -81,33 +79,33 @@ func resourceManagedDisk() *schema.Resource { }, "source_uri": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ForceNew: true, }, "source_resource_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, }, "storage_account_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, // Not supported by disk update ValidateFunc: azure.ValidateResourceID, }, "image_reference_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, }, "os_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ string(compute.Windows), @@ -116,26 +114,26 @@ func resourceManagedDisk() *schema.Resource { }, "disk_size_gb": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validate.ManagedDiskSizeGB, }, "disk_iops_read_write": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, }, "disk_mbps_read_write": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, }, "disk_encryption_set_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, // TODO: make this case-sensitive once this bug in the Azure API has been fixed: // https://github.com/Azure/azure-rest-api-specs/issues/8132 @@ -146,7 +144,7 @@ func resourceManagedDisk() *schema.Resource { "encryption_settings": encryptionSettingsSchema(), "network_access_policy": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ string(compute.AllowAll), @@ -155,7 +153,7 @@ func resourceManagedDisk() *schema.Resource { }, false), }, "disk_access_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, // TODO: make this case-sensitive once this bug in the Azure API has been fixed: // https://github.com/Azure/azure-rest-api-specs/issues/14192 @@ -164,7 +162,7 @@ func resourceManagedDisk() *schema.Resource { }, "tier": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, }, @@ -174,7 +172,7 @@ func resourceManagedDisk() *schema.Resource { } } -func resourceManagedDiskCreate(d *schema.ResourceData, meta interface{}) error { +func resourceManagedDiskCreate(d *pluginsdk.ResourceData, meta interface{}) error { subscriptionId := meta.(*clients.Client).Account.SubscriptionId client := meta.(*clients.Client).Compute.DisksClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) @@ -342,7 +340,7 @@ func resourceManagedDiskCreate(d *schema.ResourceData, meta interface{}) error { return resourceManagedDiskRead(d, meta) } -func resourceManagedDiskUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceManagedDiskUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.DisksClient ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -572,7 +570,7 @@ func resourceManagedDiskUpdate(d *schema.ResourceData, meta interface{}) error { return resourceManagedDiskRead(d, meta) } -func resourceManagedDiskRead(d *schema.ResourceData, meta interface{}) error { +func resourceManagedDiskRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.DisksClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -644,7 +642,7 @@ func resourceManagedDiskRead(d *schema.ResourceData, meta interface{}) error { return tags.FlattenAndSet(d, resp.Tags) } -func resourceManagedDiskDelete(d *schema.ResourceData, meta interface{}) error { +func resourceManagedDiskDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.DisksClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/managed_disk_resource_test.go b/azurerm/internal/services/compute/managed_disk_resource_test.go index dc0c206714f8..97982967f116 100644 --- a/azurerm/internal/services/compute/managed_disk_resource_test.go +++ b/azurerm/internal/services/compute/managed_disk_resource_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -22,10 +21,10 @@ func TestAccManagedDisk_empty(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_managed_disk", "test") r := ManagedDiskResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.empty(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -37,10 +36,10 @@ func TestAccManagedDisk_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_managed_disk", "test") r := ManagedDiskResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.empty(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -55,10 +54,10 @@ func TestAccManagedDisk_zeroGbFromPlatformImage(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_managed_disk", "test") r := ManagedDiskResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.zeroGbFromPlatformImage(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), ExpectNonEmptyPlan: true, // since the `disk_size_gb` will have changed @@ -71,13 +70,13 @@ func TestAccManagedDisk_import(t *testing.T) { r := ManagedDiskResource{} vm := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // need to create a vm and then delete it so we can use the vhd to test import Config: vm.basicLinuxMachine(data), Destroy: false, ExpectNonEmptyPlan: true, - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( // TODO: switch to using `azurerm_linux_virtual_machine` once Binary Testing is enabled check.That("azurerm_virtual_machine.test").ExistsInAzure(vm), data.CheckWithClientForResource(r.destroyVirtualMachine, "azurerm_virtual_machine.test"), @@ -85,7 +84,7 @@ func TestAccManagedDisk_import(t *testing.T) { }, { Config: r.importConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -96,10 +95,10 @@ func TestAccManagedDisk_copy(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_managed_disk", "test") r := ManagedDiskResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.copy(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -110,10 +109,10 @@ func TestAccManagedDisk_fromPlatformImage(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_managed_disk", "test") r := ManagedDiskResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.platformImage(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -124,10 +123,10 @@ func TestAccManagedDisk_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_managed_disk", "test") r := ManagedDiskResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.empty(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("tags.%").HasValue("2"), check.That(data.ResourceName).Key("tags.environment").HasValue("acctest"), @@ -138,7 +137,7 @@ func TestAccManagedDisk_update(t *testing.T) { }, { Config: r.empty_updated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("tags.%").HasValue("1"), check.That(data.ResourceName).Key("tags.environment").HasValue("acctest"), @@ -153,10 +152,10 @@ func TestAccManagedDisk_encryption(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_managed_disk", "test") r := ManagedDiskResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.encryption(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("encryption_settings.#").HasValue("1"), check.That(data.ResourceName).Key("encryption_settings.0.enabled").HasValue("true"), @@ -175,10 +174,10 @@ func TestAccManagedDisk_importEmpty_withZone(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_managed_disk", "test") r := ManagedDiskResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.empty_withZone(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -190,10 +189,10 @@ func TestAccManagedDisk_create_withUltraSSD(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_managed_disk", "test") r := ManagedDiskResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.create_withUltraSSD(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -205,10 +204,10 @@ func TestAccManagedDisk_update_withUltraSSD(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_managed_disk", "test") r := ManagedDiskResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.create_withUltraSSD(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("disk_iops_read_write").HasValue("101"), check.That(data.ResourceName).Key("disk_mbps_read_write").HasValue("10"), @@ -216,7 +215,7 @@ func TestAccManagedDisk_update_withUltraSSD(t *testing.T) { }, { Config: r.update_withUltraSSD(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("disk_iops_read_write").HasValue("102"), check.That(data.ResourceName).Key("disk_mbps_read_write").HasValue("11"), @@ -229,10 +228,10 @@ func TestAccManagedDisk_import_withUltraSSD(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_managed_disk", "test") r := ManagedDiskResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.create_withUltraSSD(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -244,10 +243,10 @@ func TestAccManagedDisk_diskEncryptionSet(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_managed_disk", "test") r := ManagedDiskResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskEncryptionSetEncrypted(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -259,17 +258,17 @@ func TestAccManagedDisk_diskEncryptionSet_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_managed_disk", "test") r := ManagedDiskResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskEncryptionSetUnencrypted(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.diskEncryptionSetEncrypted(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -281,17 +280,17 @@ func TestAccManagedDisk_attachedDiskUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_managed_disk", "test") r := ManagedDiskResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.managedDiskAttached(data, 10), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.managedDiskAttached(data, 20), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("disk_size_gb").HasValue("20"), ), @@ -304,17 +303,17 @@ func TestAccManagedDisk_attachedStorageTypeUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_managed_disk", "test") r := ManagedDiskResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.storageTypeUpdateWhilstAttached(data, "Standard_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.storageTypeUpdateWhilstAttached(data, "Premium_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -326,10 +325,10 @@ func TestAccManagedDisk_attachedTierUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_managed_disk", "test") r := ManagedDiskResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.tierUpdateWhileAttached(data, "P10"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("tier").HasValue("P10"), ), @@ -337,7 +336,7 @@ func TestAccManagedDisk_attachedTierUpdate(t *testing.T) { data.ImportStep(), { Config: r.tierUpdateWhileAttached(data, "P20"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("tier").HasValue("P20"), ), @@ -350,10 +349,10 @@ func TestAccAzureRMManagedDisk_networkPolicy(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_managed_disk", "test") r := ManagedDiskResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: testAccAzureRMManagedDisk_networkPolicy_create(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -365,20 +364,20 @@ func TestAccAzureRMManagedDisk_networkPolicy_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_managed_disk", "test") r := ManagedDiskResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: testAccAzureRMManagedDisk_networkPolicy_create(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - resource.TestCheckResourceAttr(data.ResourceName, "network_access_policy", "DenyAll"), + acceptance.TestCheckResourceAttr(data.ResourceName, "network_access_policy", "DenyAll"), ), }, data.ImportStep(), { Config: testAccAzureRMManagedDisk_networkPolicy_update(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - resource.TestCheckResourceAttr(data.ResourceName, "network_access_policy", "DenyAll"), + acceptance.TestCheckResourceAttr(data.ResourceName, "network_access_policy", "DenyAll"), ), }, data.ImportStep(), @@ -389,10 +388,10 @@ func TestAccAzureRMManagedDisk_networkPolicy_create_withAllowPrivate(t *testing. data := acceptance.BuildTestData(t, "azurerm_managed_disk", "test") r := ManagedDiskResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: testAccAzureRMManagedDisk_networkPolicy_create_withAllowPrivate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -404,27 +403,27 @@ func TestAccAzureRMManagedDisk_networkPolicy_update_withAllowPrivate(t *testing. data := acceptance.BuildTestData(t, "azurerm_managed_disk", "test") r := ManagedDiskResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: testAccAzureRMManagedDisk_networkPolicy_create_withAllowPrivate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - resource.TestCheckResourceAttr(data.ResourceName, "network_access_policy", "AllowPrivate"), + acceptance.TestCheckResourceAttr(data.ResourceName, "network_access_policy", "AllowPrivate"), ), }, data.ImportStep(), { Config: testAccAzureRMManagedDisk_networkPolicy_update_withAllowPrivate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - resource.TestCheckResourceAttr(data.ResourceName, "network_access_policy", "AllowPrivate"), + acceptance.TestCheckResourceAttr(data.ResourceName, "network_access_policy", "AllowPrivate"), ), }, data.ImportStep(), }) } -func (ManagedDiskResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (ManagedDiskResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.ManagedDiskID(state.ID) if err != nil { return nil, err @@ -438,7 +437,7 @@ func (ManagedDiskResource) Exists(ctx context.Context, clients *clients.Client, return utils.Bool(resp.ID != nil), nil } -func (ManagedDiskResource) destroyVirtualMachine(ctx context.Context, client *clients.Client, state *terraform.InstanceState) error { +func (ManagedDiskResource) destroyVirtualMachine(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) error { vmName := state.Attributes["name"] resourceGroup := state.Attributes["resource_group_name"] diff --git a/azurerm/internal/services/compute/marketplace_agreement_resource.go b/azurerm/internal/services/compute/marketplace_agreement_resource.go index 2962fa3f19da..966c737070ee 100644 --- a/azurerm/internal/services/compute/marketplace_agreement_resource.go +++ b/azurerm/internal/services/compute/marketplace_agreement_resource.go @@ -5,67 +5,66 @@ import ( "log" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceMarketplaceAgreement() *schema.Resource { - return &schema.Resource{ +func resourceMarketplaceAgreement() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceMarketplaceAgreementCreateUpdate, Read: resourceMarketplaceAgreementRead, Delete: resourceMarketplaceAgreementDelete, // TODO: replace this with an importer which validates the ID during import Importer: pluginsdk.DefaultImporter(), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "offer": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, "plan": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, "publisher": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, "license_text_link": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "privacy_policy_link": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, } } -func resourceMarketplaceAgreementCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceMarketplaceAgreementCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.MarketplaceAgreementsClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -126,7 +125,7 @@ func resourceMarketplaceAgreementCreateUpdate(d *schema.ResourceData, meta inter return resourceMarketplaceAgreementRead(d, meta) } -func resourceMarketplaceAgreementRead(d *schema.ResourceData, meta interface{}) error { +func resourceMarketplaceAgreementRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.MarketplaceAgreementsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -167,7 +166,7 @@ func resourceMarketplaceAgreementRead(d *schema.ResourceData, meta interface{}) return nil } -func resourceMarketplaceAgreementDelete(d *schema.ResourceData, meta interface{}) error { +func resourceMarketplaceAgreementDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.MarketplaceAgreementsClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/marketplace_agreement_resource_test.go b/azurerm/internal/services/compute/marketplace_agreement_resource_test.go index 95fa162f33dd..ce702d6194fc 100644 --- a/azurerm/internal/services/compute/marketplace_agreement_resource_test.go +++ b/azurerm/internal/services/compute/marketplace_agreement_resource_test.go @@ -5,12 +5,11 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -43,10 +42,10 @@ func testAccMarketplaceAgreement_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_marketplace_agreement", "test") r := MarketplaceAgreementResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("license_text_link").Exists(), check.That(data.ResourceName).Key("privacy_policy_link").Exists(), @@ -60,10 +59,10 @@ func testAccMarketplaceAgreement_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_marketplace_agreement", "test") r := MarketplaceAgreementResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -78,7 +77,7 @@ func testAccMarketplaceAgreement_agreementCanceled(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_marketplace_agreement", "test") r := MarketplaceAgreementResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ data.DisappearsStep(acceptance.DisappearsStepData{ Config: r.basicConfig, TestResource: r, @@ -86,7 +85,7 @@ func testAccMarketplaceAgreement_agreementCanceled(t *testing.T) { }) } -func (t MarketplaceAgreementResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t MarketplaceAgreementResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := azure.ParseAzureResourceID(state.ID) if err != nil { return nil, err @@ -103,7 +102,7 @@ func (t MarketplaceAgreementResource) Exists(ctx context.Context, clients *clien return utils.Bool(resp.ID != nil), nil } -func (MarketplaceAgreementResource) Destroy(ctx context.Context, client *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (MarketplaceAgreementResource) Destroy(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := azure.ParseAzureResourceID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/compute/migration/legacy_vmss.go b/azurerm/internal/services/compute/migration/legacy_vmss.go index 0134b72dd8be..e54eb486692a 100644 --- a/azurerm/internal/services/compute/migration/legacy_vmss.go +++ b/azurerm/internal/services/compute/migration/legacy_vmss.go @@ -7,11 +7,8 @@ import ( "encoding/hex" "fmt" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/structure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -20,54 +17,54 @@ var _ pluginsdk.StateUpgrade = LegacyVMSSV0ToV1{} type LegacyVMSSV0ToV1 struct{} func (LegacyVMSSV0ToV1) Schema() map[string]*pluginsdk.Schema { - return map[string]*schema.Schema{ + return map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "location": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "resource_group_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "zones": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "identity": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, Computed: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "identity_ids": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "principal_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -75,24 +72,24 @@ func (LegacyVMSSV0ToV1) Schema() map[string]*pluginsdk.Schema { }, "sku": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "tier": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, }, "capacity": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, }, }, @@ -100,53 +97,53 @@ func (LegacyVMSSV0ToV1) Schema() map[string]*pluginsdk.Schema { }, "license_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, }, "upgrade_policy_mode": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "health_probe_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "automatic_os_upgrade": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, "rolling_upgrade_policy": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "max_batch_instance_percent": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Default: 20, }, "max_unhealthy_instance_percent": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Default: 20, }, "max_unhealthy_upgraded_instance_percent": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Default: 20, }, "pause_time_between_batches": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: "PT0S", }, @@ -155,55 +152,55 @@ func (LegacyVMSSV0ToV1) Schema() map[string]*pluginsdk.Schema { }, "overprovision": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, "single_placement_group": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, ForceNew: true, }, "priority": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, }, "eviction_policy": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, }, "os_profile": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "computer_name_prefix": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "admin_username": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "admin_password": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Sensitive: true, }, "custom_data": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, StateFunc: userDataStateFunc, }, @@ -212,26 +209,26 @@ func (LegacyVMSSV0ToV1) Schema() map[string]*pluginsdk.Schema { }, "os_profile_secrets": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "source_vault_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "vault_certificates": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "certificate_url": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "certificate_store": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, }, @@ -243,54 +240,54 @@ func (LegacyVMSSV0ToV1) Schema() map[string]*pluginsdk.Schema { // lintignore:S018 "os_profile_windows_config": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "provision_vm_agent": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, }, "enable_automatic_upgrades": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, }, "winrm": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "protocol": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "certificate_url": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, }, }, }, "additional_unattend_config": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "pass": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "component": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "setting_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "content": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, Sensitive: true, }, @@ -304,29 +301,29 @@ func (LegacyVMSSV0ToV1) Schema() map[string]*pluginsdk.Schema { // lintignore:S018 "os_profile_linux_config": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, Computed: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "disable_password_authentication": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, ForceNew: true, }, "ssh_keys": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "path": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "key_data": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, }, @@ -338,47 +335,47 @@ func (LegacyVMSSV0ToV1) Schema() map[string]*pluginsdk.Schema { }, "network_profile": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "primary": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Required: true, }, "accelerated_networking": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, }, "ip_forwarding": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, "network_security_group_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "dns_settings": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "dns_servers": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, }, @@ -386,75 +383,75 @@ func (LegacyVMSSV0ToV1) Schema() map[string]*pluginsdk.Schema { }, "ip_configuration": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "subnet_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "application_gateway_backend_address_pool_ids": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "application_security_group_ids": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, - Set: schema.HashString, + Set: pluginsdk.HashString, MaxItems: 20, }, "load_balancer_backend_address_pool_ids": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "load_balancer_inbound_nat_rules_ids": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "primary": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Required: true, }, "public_ip_address_configuration": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "idle_timeout": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, }, "domain_name_label": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, }, @@ -469,19 +466,19 @@ func (LegacyVMSSV0ToV1) Schema() map[string]*pluginsdk.Schema { }, "boot_diagnostics": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, "storage_uri": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, }, @@ -490,48 +487,48 @@ func (LegacyVMSSV0ToV1) Schema() map[string]*pluginsdk.Schema { // lintignore:S018 "storage_profile_os_disk": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "image": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "vhd_containers": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "managed_disk_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ConflictsWith: []string{"storage_profile_os_disk.vhd_containers"}, }, "caching": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, }, "os_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "create_option": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, }, @@ -540,34 +537,34 @@ func (LegacyVMSSV0ToV1) Schema() map[string]*pluginsdk.Schema { }, "storage_profile_data_disk": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "lun": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, }, "create_option": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "caching": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, }, "disk_size_gb": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, }, "managed_disk_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, }, @@ -577,34 +574,34 @@ func (LegacyVMSSV0ToV1) Schema() map[string]*pluginsdk.Schema { // lintignore:S018 "storage_profile_image_reference": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, Computed: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "publisher": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "offer": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "sku": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "version": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, }, @@ -614,23 +611,23 @@ func (LegacyVMSSV0ToV1) Schema() map[string]*pluginsdk.Schema { // lintignore:S018 "plan": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "publisher": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "product": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, }, @@ -638,42 +635,42 @@ func (LegacyVMSSV0ToV1) Schema() map[string]*pluginsdk.Schema { }, "extension": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "publisher": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "type_handler_version": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "auto_upgrade_minor_version": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, }, "settings": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "protected_settings": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Sensitive: true, }, @@ -683,10 +680,10 @@ func (LegacyVMSSV0ToV1) Schema() map[string]*pluginsdk.Schema { }, "tags": { - Type: schema.TypeMap, + Type: pluginsdk.TypeMap, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, } @@ -736,7 +733,7 @@ func resourceArmVirtualMachineScaleSetOsProfileWindowsConfigHash(v interface{}) } } - return schema.HashString(buf.String()) + return pluginsdk.HashString(buf.String()) } func resourceArmVirtualMachineScaleSetOsProfileLinuxConfigHash(v interface{}) int { @@ -746,7 +743,7 @@ func resourceArmVirtualMachineScaleSetOsProfileLinuxConfigHash(v interface{}) in buf.WriteString(fmt.Sprintf("%t-", m["disable_password_authentication"].(bool))) } - return schema.HashString(buf.String()) + return pluginsdk.HashString(buf.String()) } func resourceArmVirtualMachineScaleSetNetworkConfigurationHash(v interface{}) int { @@ -757,7 +754,7 @@ func resourceArmVirtualMachineScaleSetNetworkConfigurationHash(v interface{}) in buf.WriteString(fmt.Sprintf("%t-", m["primary"].(bool))) } - return schema.HashString(buf.String()) + return pluginsdk.HashString(buf.String()) } func resourceArmVirtualMachineScaleSetStorageProfileOsDiskHash(v interface{}) int { @@ -767,11 +764,11 @@ func resourceArmVirtualMachineScaleSetStorageProfileOsDiskHash(v interface{}) in buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) if v, ok := m["vhd_containers"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(*schema.Set).List())) + buf.WriteString(fmt.Sprintf("%s-", v.(*pluginsdk.Set).List())) } } - return schema.HashString(buf.String()) + return pluginsdk.HashString(buf.String()) } func resourceArmVirtualMachineScaleSetStorageProfileImageReferenceHash(v interface{}) int { @@ -795,7 +792,7 @@ func resourceArmVirtualMachineScaleSetStorageProfileImageReferenceHash(v interfa } } - return schema.HashString(buf.String()) + return pluginsdk.HashString(buf.String()) } func resourceArmVirtualMachineScaleSetExtensionHash(v interface{}) int { @@ -812,15 +809,15 @@ func resourceArmVirtualMachineScaleSetExtensionHash(v interface{}) int { } if v, ok := m["provision_after_extensions"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(*schema.Set).List())) + buf.WriteString(fmt.Sprintf("%s-", v.(*pluginsdk.Set).List())) } // we need to ensure the whitespace is consistent settings := m["settings"].(string) if settings != "" { - expandedSettings, err := structure.ExpandJsonFromString(settings) + expandedSettings, err := pluginsdk.ExpandJsonFromString(settings) if err == nil { - serialisedSettings, err := structure.FlattenJsonToString(expandedSettings) + serialisedSettings, err := pluginsdk.FlattenJsonToString(expandedSettings) if err == nil { buf.WriteString(fmt.Sprintf("%s-", serialisedSettings)) } @@ -828,5 +825,5 @@ func resourceArmVirtualMachineScaleSetExtensionHash(v interface{}) int { } } - return schema.HashString(buf.String()) + return pluginsdk.HashString(buf.String()) } diff --git a/azurerm/internal/services/compute/network_interface.go b/azurerm/internal/services/compute/network_interface.go index 4e4e3312f515..5579a601ebbd 100644 --- a/azurerm/internal/services/compute/network_interface.go +++ b/azurerm/internal/services/compute/network_interface.go @@ -4,9 +4,9 @@ import ( "context" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) // nolint: deadcode unused @@ -150,7 +150,7 @@ func retrievePublicIPAddress(ctx context.Context, client *network.PublicIPAddres // to connect to the Virtual Machine. A Public IP Address is used if one is available // but this falls back to a Private IP Address (which should always exist) // nolint: deadcode unused -func setConnectionInformation(d *schema.ResourceData, input connectionInfo, isWindows bool) { +func setConnectionInformation(d *pluginsdk.ResourceData, input connectionInfo, isWindows bool) { provisionerType := "ssh" if isWindows { provisionerType = "winrm" diff --git a/azurerm/internal/services/compute/orchestrated_virtual_machine_scale_set_resource.go b/azurerm/internal/services/compute/orchestrated_virtual_machine_scale_set_resource.go index a3a0cb455e3a..445ca3ae13cb 100644 --- a/azurerm/internal/services/compute/orchestrated_virtual_machine_scale_set_resource.go +++ b/azurerm/internal/services/compute/orchestrated_virtual_machine_scale_set_resource.go @@ -6,8 +6,6 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -17,12 +15,13 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceOrchestratedVirtualMachineScaleSet() *schema.Resource { - return &schema.Resource{ +func resourceOrchestratedVirtualMachineScaleSet() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceOrchestratedVirtualMachineScaleSetCreateUpdate, Read: resourceOrchestratedVirtualMachineScaleSetRead, Update: resourceOrchestratedVirtualMachineScaleSetCreateUpdate, @@ -33,16 +32,16 @@ func resourceOrchestratedVirtualMachineScaleSet() *schema.Resource { return err }, importOrchestratedVirtualMachineScaleSet), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.VirtualMachineName, @@ -53,7 +52,7 @@ func resourceOrchestratedVirtualMachineScaleSet() *schema.Resource { "location": azure.SchemaLocation(), "platform_fault_domain_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, ForceNew: true, // The range of this value varies in different locations @@ -61,7 +60,7 @@ func resourceOrchestratedVirtualMachineScaleSet() *schema.Resource { }, "proximity_placement_group_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validate.ProximityPlacementGroupID, @@ -70,7 +69,7 @@ func resourceOrchestratedVirtualMachineScaleSet() *schema.Resource { }, "single_placement_group": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, ForceNew: true, Default: false, @@ -80,7 +79,7 @@ func resourceOrchestratedVirtualMachineScaleSet() *schema.Resource { "zones": azure.SchemaSingleZone(), "unique_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, @@ -89,7 +88,7 @@ func resourceOrchestratedVirtualMachineScaleSet() *schema.Resource { } } -func resourceOrchestratedVirtualMachineScaleSetCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceOrchestratedVirtualMachineScaleSetCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMScaleSetClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -148,7 +147,7 @@ func resourceOrchestratedVirtualMachineScaleSetCreateUpdate(d *schema.ResourceDa return resourceOrchestratedVirtualMachineScaleSetRead(d, meta) } -func resourceOrchestratedVirtualMachineScaleSetRead(d *schema.ResourceData, meta interface{}) error { +func resourceOrchestratedVirtualMachineScaleSetRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMScaleSetClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -191,7 +190,7 @@ func resourceOrchestratedVirtualMachineScaleSetRead(d *schema.ResourceData, meta return tags.FlattenAndSet(d, resp.Tags) } -func resourceOrchestratedVirtualMachineScaleSetDelete(d *schema.ResourceData, meta interface{}) error { +func resourceOrchestratedVirtualMachineScaleSetDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMScaleSetClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -201,7 +200,7 @@ func resourceOrchestratedVirtualMachineScaleSetDelete(d *schema.ResourceData, me return err } - // @ArcturusZhang (mimicking from linux_virtual_machine_resource.go): sending `nil` here omits this value from being sent + // @ArcturusZhang (mimicking from linux_virtual_machine_pluginsdk.go): sending `nil` here omits this value from being sent // which matches the previous behaviour - we're only splitting this out so it's clear why // TODO: support force deletion once it's out of Preview, if applicable var forceDeletion *bool = nil diff --git a/azurerm/internal/services/compute/orchestrated_virtual_machine_scale_set_resource_test.go b/azurerm/internal/services/compute/orchestrated_virtual_machine_scale_set_resource_test.go index 339605cdb0c4..a34319011f23 100644 --- a/azurerm/internal/services/compute/orchestrated_virtual_machine_scale_set_resource_test.go +++ b/azurerm/internal/services/compute/orchestrated_virtual_machine_scale_set_resource_test.go @@ -5,12 +5,11 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -21,10 +20,10 @@ func TestAccOrchestratedVirtualMachineScaleSet_basicZonal(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") r := OrchestratedVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -36,24 +35,24 @@ func TestAccOrchestratedVirtualMachineScaleSet_updateZonal(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") r := OrchestratedVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.update(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -65,10 +64,10 @@ func TestAccOrchestratedVirtualMachineScaleSet_basicNonZonal(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") r := OrchestratedVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicNonZonal(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -80,10 +79,10 @@ func TestAccOrchestratedVirtualMachineScaleSet_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_orchestrated_virtual_machine_scale_set", "test") r := OrchestratedVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -91,7 +90,7 @@ func TestAccOrchestratedVirtualMachineScaleSet_requiresImport(t *testing.T) { }) } -func (t OrchestratedVirtualMachineScaleSetResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t OrchestratedVirtualMachineScaleSetResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.VirtualMachineScaleSetID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/compute/platform_image_data_source.go b/azurerm/internal/services/compute/platform_image_data_source.go index 042bf50284b3..103ed8ddecf4 100644 --- a/azurerm/internal/services/compute/platform_image_data_source.go +++ b/azurerm/internal/services/compute/platform_image_data_source.go @@ -5,41 +5,41 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourcePlatformImage() *schema.Resource { - return &schema.Resource{ +func dataSourcePlatformImage() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourcePlatformImageRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "location": azure.SchemaLocation(), "publisher": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "offer": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "sku": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "version": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, }, @@ -47,7 +47,7 @@ func dataSourcePlatformImage() *schema.Resource { } } -func dataSourcePlatformImageRead(d *schema.ResourceData, meta interface{}) error { +func dataSourcePlatformImageRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMImageClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/platform_image_data_source_test.go b/azurerm/internal/services/compute/platform_image_data_source_test.go index 712b36ee5f28..cdb6a395ea82 100644 --- a/azurerm/internal/services/compute/platform_image_data_source_test.go +++ b/azurerm/internal/services/compute/platform_image_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -16,10 +15,10 @@ func TestAccDataSourcePlatformImage_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_platform_image", "test") r := PlatformImageDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("version").Exists(), check.That(data.ResourceName).Key("publisher").HasValue("Canonical"), check.That(data.ResourceName).Key("offer").HasValue("UbuntuServer"), @@ -33,10 +32,10 @@ func TestAccDataSourcePlatformImage_withVersion(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_platform_image", "test") r := PlatformImageDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.withVersion(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("version").Exists(), check.That(data.ResourceName).Key("publisher").HasValue("Canonical"), check.That(data.ResourceName).Key("offer").HasValue("UbuntuServer"), diff --git a/azurerm/internal/services/compute/proximity_placement_group_data_source.go b/azurerm/internal/services/compute/proximity_placement_group_data_source.go index e226bb60aa72..a6ecba977333 100644 --- a/azurerm/internal/services/compute/proximity_placement_group_data_source.go +++ b/azurerm/internal/services/compute/proximity_placement_group_data_source.go @@ -4,26 +4,26 @@ import ( "fmt" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceProximityPlacementGroup() *schema.Resource { - return &schema.Resource{ +func dataSourceProximityPlacementGroup() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceProximityPlacementGroupRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.NoZeroValues, }, @@ -37,7 +37,7 @@ func dataSourceProximityPlacementGroup() *schema.Resource { } } -func dataSourceProximityPlacementGroupRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceProximityPlacementGroupRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.ProximityPlacementGroupsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/proximity_placement_group_data_source_test.go b/azurerm/internal/services/compute/proximity_placement_group_data_source_test.go index 507f76cd042a..8596838131b3 100644 --- a/azurerm/internal/services/compute/proximity_placement_group_data_source_test.go +++ b/azurerm/internal/services/compute/proximity_placement_group_data_source_test.go @@ -6,8 +6,6 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" - - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" ) type ProximityPlacementGroupDataSource struct { @@ -17,10 +15,10 @@ func TestAccProximityPlacementGroupDataSource_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_proximity_placement_group", "test") r := ProximityPlacementGroupDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("location").Exists(), check.That(data.ResourceName).Key("name").Exists(), check.That(data.ResourceName).Key("resource_group_name").Exists(), diff --git a/azurerm/internal/services/compute/proximity_placement_group_resource.go b/azurerm/internal/services/compute/proximity_placement_group_resource.go index c69c6a454d32..61c9e5aa087a 100644 --- a/azurerm/internal/services/compute/proximity_placement_group_resource.go +++ b/azurerm/internal/services/compute/proximity_placement_group_resource.go @@ -6,19 +6,18 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceProximityPlacementGroup() *schema.Resource { - return &schema.Resource{ +func resourceProximityPlacementGroup() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceProximityPlacementGroupCreateUpdate, Read: resourceProximityPlacementGroupRead, Update: resourceProximityPlacementGroupCreateUpdate, @@ -27,16 +26,16 @@ func resourceProximityPlacementGroup() *schema.Resource { // TODO: replace this with an importer which validates the ID during import Importer: pluginsdk.DefaultImporter(), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, @@ -51,7 +50,7 @@ func resourceProximityPlacementGroup() *schema.Resource { } } -func resourceProximityPlacementGroupCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceProximityPlacementGroupCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.ProximityPlacementGroupsClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -90,7 +89,7 @@ func resourceProximityPlacementGroupCreateUpdate(d *schema.ResourceData, meta in return resourceProximityPlacementGroupRead(d, meta) } -func resourceProximityPlacementGroupRead(d *schema.ResourceData, meta interface{}) error { +func resourceProximityPlacementGroupRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.ProximityPlacementGroupsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -120,7 +119,7 @@ func resourceProximityPlacementGroupRead(d *schema.ResourceData, meta interface{ return tags.FlattenAndSet(d, resp.Tags) } -func resourceProximityPlacementGroupDelete(d *schema.ResourceData, meta interface{}) error { +func resourceProximityPlacementGroupDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.ProximityPlacementGroupsClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/proximity_placement_group_resource_test.go b/azurerm/internal/services/compute/proximity_placement_group_resource_test.go index 2ebaf79993b4..1e2a8da97292 100644 --- a/azurerm/internal/services/compute/proximity_placement_group_resource_test.go +++ b/azurerm/internal/services/compute/proximity_placement_group_resource_test.go @@ -5,14 +5,12 @@ import ( "fmt" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" - "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -23,10 +21,10 @@ func TestAccProximityPlacementGroup_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_proximity_placement_group", "test") r := ProximityPlacementGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -38,10 +36,10 @@ func TestAccProximityPlacementGroup_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_proximity_placement_group", "test") r := ProximityPlacementGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -56,7 +54,7 @@ func TestAccProximityPlacementGroup_disappears(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_proximity_placement_group", "test") r := ProximityPlacementGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ data.DisappearsStep(acceptance.DisappearsStepData{ Config: r.basic, TestResource: r, @@ -68,10 +66,10 @@ func TestAccProximityPlacementGroup_withTags(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_proximity_placement_group", "test") r := ProximityPlacementGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.withTags(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("tags.%").HasValue("2"), check.That(data.ResourceName).Key("tags.environment").HasValue("Production"), @@ -80,7 +78,7 @@ func TestAccProximityPlacementGroup_withTags(t *testing.T) { }, { Config: r.withUpdatedTags(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("tags.%").HasValue("1"), check.That(data.ResourceName).Key("tags.environment").HasValue("staging"), @@ -90,7 +88,7 @@ func TestAccProximityPlacementGroup_withTags(t *testing.T) { }) } -func (t ProximityPlacementGroupResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t ProximityPlacementGroupResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.ProximityPlacementGroupID(state.ID) if err != nil { return nil, err @@ -104,7 +102,7 @@ func (t ProximityPlacementGroupResource) Exists(ctx context.Context, clients *cl return utils.Bool(resp.ID != nil), nil } -func (ProximityPlacementGroupResource) Destroy(ctx context.Context, client *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (ProximityPlacementGroupResource) Destroy(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.ProximityPlacementGroupID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/compute/registration.go b/azurerm/internal/services/compute/registration.go index 6f7f9fffc823..2233f3dda1bd 100644 --- a/azurerm/internal/services/compute/registration.go +++ b/azurerm/internal/services/compute/registration.go @@ -1,7 +1,7 @@ package compute import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) type Registration struct{} @@ -19,8 +19,8 @@ func (r Registration) WebsiteCategories() []string { } // SupportedDataSources returns the supported Data Sources supported by this Service -func (r Registration) SupportedDataSources() map[string]*schema.Resource { - return map[string]*schema.Resource{ +func (r Registration) SupportedDataSources() map[string]*pluginsdk.Resource { + return map[string]*pluginsdk.Resource{ "azurerm_availability_set": dataSourceAvailabilitySet(), "azurerm_dedicated_host": dataSourceDedicatedHost(), "azurerm_dedicated_host_group": dataSourceDedicatedHostGroup(), @@ -43,8 +43,8 @@ func (r Registration) SupportedDataSources() map[string]*schema.Resource { } // SupportedResources returns the supported Resources supported by this Service -func (r Registration) SupportedResources() map[string]*schema.Resource { - resources := map[string]*schema.Resource{ +func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { + resources := map[string]*pluginsdk.Resource{ "azurerm_availability_set": resourceAvailabilitySet(), "azurerm_dedicated_host": resourceDedicatedHost(), "azurerm_dedicated_host_group": resourceDedicatedHostGroup(), diff --git a/azurerm/internal/services/compute/shared_image_data_source.go b/azurerm/internal/services/compute/shared_image_data_source.go index a9faf518ac23..12afa946785a 100644 --- a/azurerm/internal/services/compute/shared_image_data_source.go +++ b/azurerm/internal/services/compute/shared_image_data_source.go @@ -5,32 +5,32 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceSharedImage() *schema.Resource { - return &schema.Resource{ +func dataSourceSharedImage() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceSharedImageRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.SharedImageName, }, "gallery_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.SharedImageGalleryName, }, @@ -40,35 +40,35 @@ func dataSourceSharedImage() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupNameForDataSource(), "os_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "specialized": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "hyper_v_generation": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "identifier": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "publisher": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "offer": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "sku": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -76,22 +76,22 @@ func dataSourceSharedImage() *schema.Resource { }, "description": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "eula": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "privacy_statement_uri": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "release_note_uri": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, @@ -100,7 +100,7 @@ func dataSourceSharedImage() *schema.Resource { } } -func dataSourceSharedImageRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceSharedImageRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.GalleryImagesClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/shared_image_data_source_test.go b/azurerm/internal/services/compute/shared_image_data_source_test.go index ae4ebf500f2d..c1a9c1a8481e 100644 --- a/azurerm/internal/services/compute/shared_image_data_source_test.go +++ b/azurerm/internal/services/compute/shared_image_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -15,10 +14,10 @@ type SharedImageDataSource struct { func TestAccDataSourceAzureRMSharedImage_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_shared_image", "test") r := SharedImageDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic(data, ""), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("tags.%").HasValue("0"), ), }, @@ -28,10 +27,10 @@ func TestAccDataSourceAzureRMSharedImage_basic(t *testing.T) { func TestAccDataSourceAzureRMSharedImage_basic_hyperVGeneration_V2(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_shared_image", "test") r := SharedImageDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic(data, "V2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("tags.%").HasValue("0"), check.That(data.ResourceName).Key("hyper_v_generation").HasValue("V2"), ), @@ -42,10 +41,10 @@ func TestAccDataSourceAzureRMSharedImage_basic_hyperVGeneration_V2(t *testing.T) func TestAccDataSourceAzureRMSharedImage_complete(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_shared_image", "test") r := SharedImageDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.complete(data, "V1"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("tags.%").HasValue("0"), check.That(data.ResourceName).Key("hyper_v_generation").HasValue("V1"), ), diff --git a/azurerm/internal/services/compute/shared_image_gallery_data_source.go b/azurerm/internal/services/compute/shared_image_gallery_data_source.go index 344367bf3194..76a5cab7f8a1 100644 --- a/azurerm/internal/services/compute/shared_image_gallery_data_source.go +++ b/azurerm/internal/services/compute/shared_image_gallery_data_source.go @@ -4,26 +4,26 @@ import ( "fmt" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceSharedImageGallery() *schema.Resource { - return &schema.Resource{ +func dataSourceSharedImageGallery() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceSharedImageGalleryRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.SharedImageGalleryName, }, @@ -33,12 +33,12 @@ func dataSourceSharedImageGallery() *schema.Resource { "location": azure.SchemaLocationForDataSource(), "description": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "unique_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, @@ -47,7 +47,7 @@ func dataSourceSharedImageGallery() *schema.Resource { } } -func dataSourceSharedImageGalleryRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceSharedImageGalleryRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.GalleriesClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/shared_image_gallery_data_source_test.go b/azurerm/internal/services/compute/shared_image_gallery_data_source_test.go index d1a61679db1f..84c73d564769 100644 --- a/azurerm/internal/services/compute/shared_image_gallery_data_source_test.go +++ b/azurerm/internal/services/compute/shared_image_gallery_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -16,10 +15,10 @@ func TestAccDataSourceSharedImageGallery_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_shared_image_gallery", "test") r := SharedImageGalleryDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("tags.%").HasValue("0"), ), }, @@ -30,10 +29,10 @@ func TestAccDataSourceSharedImageGallery_complete(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_shared_image_gallery", "test") r := SharedImageGalleryDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.complete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("description").HasValue("Shared images and things."), check.That(data.ResourceName).Key("tags.%").HasValue("2"), check.That(data.ResourceName).Key("tags.Hello").HasValue("There"), diff --git a/azurerm/internal/services/compute/shared_image_gallery_resource.go b/azurerm/internal/services/compute/shared_image_gallery_resource.go index c939a213d7af..c0c5919d0d37 100644 --- a/azurerm/internal/services/compute/shared_image_gallery_resource.go +++ b/azurerm/internal/services/compute/shared_image_gallery_resource.go @@ -7,7 +7,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -19,8 +18,8 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceSharedImageGallery() *schema.Resource { - return &schema.Resource{ +func resourceSharedImageGallery() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceSharedImageGalleryCreateUpdate, Read: resourceSharedImageGalleryRead, Update: resourceSharedImageGalleryCreateUpdate, @@ -30,16 +29,16 @@ func resourceSharedImageGallery() *schema.Resource { return err }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.SharedImageGalleryName, @@ -50,21 +49,21 @@ func resourceSharedImageGallery() *schema.Resource { "location": azure.SchemaLocation(), "description": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "tags": tags.Schema(), "unique_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, } } -func resourceSharedImageGalleryCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceSharedImageGalleryCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.GalleriesClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -121,7 +120,7 @@ func resourceSharedImageGalleryCreateUpdate(d *schema.ResourceData, meta interfa return resourceSharedImageGalleryRead(d, meta) } -func resourceSharedImageGalleryRead(d *schema.ResourceData, meta interface{}) error { +func resourceSharedImageGalleryRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.GalleriesClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -158,7 +157,7 @@ func resourceSharedImageGalleryRead(d *schema.ResourceData, meta interface{}) er return tags.FlattenAndSet(d, resp.Tags) } -func resourceSharedImageGalleryDelete(d *schema.ResourceData, meta interface{}) error { +func resourceSharedImageGalleryDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.GalleriesClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/shared_image_gallery_resource_test.go b/azurerm/internal/services/compute/shared_image_gallery_resource_test.go index b956aafba00f..6db2f7494a85 100644 --- a/azurerm/internal/services/compute/shared_image_gallery_resource_test.go +++ b/azurerm/internal/services/compute/shared_image_gallery_resource_test.go @@ -5,12 +5,11 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -21,10 +20,10 @@ func TestAccSharedImageGallery_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_shared_image_gallery", "test") r := SharedImageGalleryResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("description").HasValue(""), ), @@ -37,10 +36,10 @@ func TestAccSharedImageGallery_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_shared_image_gallery", "test") r := SharedImageGalleryResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("description").HasValue(""), ), @@ -56,10 +55,10 @@ func TestAccSharedImageGallery_complete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_shared_image_gallery", "test") r := SharedImageGalleryResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.complete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("description").HasValue("Shared images and things."), check.That(data.ResourceName).Key("tags.%").HasValue("2"), @@ -71,7 +70,7 @@ func TestAccSharedImageGallery_complete(t *testing.T) { }) } -func (t SharedImageGalleryResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t SharedImageGalleryResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.SharedImageGalleryID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/compute/shared_image_resource.go b/azurerm/internal/services/compute/shared_image_resource.go index 7596274cd07a..3e20e12aedb3 100644 --- a/azurerm/internal/services/compute/shared_image_resource.go +++ b/azurerm/internal/services/compute/shared_image_resource.go @@ -7,9 +7,6 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -17,12 +14,13 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceSharedImage() *schema.Resource { - return &schema.Resource{ +func resourceSharedImage() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceSharedImageCreateUpdate, Read: resourceSharedImageRead, Update: resourceSharedImageCreateUpdate, @@ -33,23 +31,23 @@ func resourceSharedImage() *schema.Resource { return err }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.SharedImageName, }, "gallery_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.SharedImageGalleryName, @@ -60,7 +58,7 @@ func resourceSharedImage() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "os_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ @@ -70,7 +68,7 @@ func resourceSharedImage() *schema.Resource { }, "hyper_v_generation": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: string(compute.HyperVGenerationTypesV1), ForceNew: true, @@ -81,21 +79,21 @@ func resourceSharedImage() *schema.Resource { }, "identifier": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "publisher": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "offer": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "sku": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, }, @@ -103,35 +101,35 @@ func resourceSharedImage() *schema.Resource { }, "description": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "eula": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "purchase_plan": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, "publisher": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, "product": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, @@ -141,17 +139,17 @@ func resourceSharedImage() *schema.Resource { }, "privacy_statement_uri": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "release_note_uri": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "specialized": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, ForceNew: true, }, @@ -161,7 +159,7 @@ func resourceSharedImage() *schema.Resource { } } -func resourceSharedImageCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceSharedImageCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.GalleryImagesClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -229,7 +227,7 @@ func resourceSharedImageCreateUpdate(d *schema.ResourceData, meta interface{}) e return resourceSharedImageRead(d, meta) } -func resourceSharedImageRead(d *schema.ResourceData, meta interface{}) error { +func resourceSharedImageRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.GalleryImagesClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -278,7 +276,7 @@ func resourceSharedImageRead(d *schema.ResourceData, meta interface{}) error { return tags.FlattenAndSet(d, resp.Tags) } -func resourceSharedImageDelete(d *schema.ResourceData, meta interface{}) error { +func resourceSharedImageDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.GalleryImagesClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -297,23 +295,23 @@ func resourceSharedImageDelete(d *schema.ResourceData, meta interface{}) error { } log.Printf("[DEBUG] Waiting for %s to be eventually deleted", *id) - stateConf := &resource.StateChangeConf{ + stateConf := &pluginsdk.StateChangeConf{ Pending: []string{"Exists"}, Target: []string{"NotFound"}, Refresh: sharedImageDeleteStateRefreshFunc(ctx, client, id.ResourceGroup, id.GalleryName, id.ImageName), MinTimeout: 10 * time.Second, ContinuousTargetOccurence: 10, - Timeout: d.Timeout(schema.TimeoutDelete), + Timeout: d.Timeout(pluginsdk.TimeoutDelete), } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for %s to be deleted: %+v", *id, err) } return nil } -func sharedImageDeleteStateRefreshFunc(ctx context.Context, client *compute.GalleryImagesClient, resourceGroupName string, galleryName string, imageName string) resource.StateRefreshFunc { +func sharedImageDeleteStateRefreshFunc(ctx context.Context, client *compute.GalleryImagesClient, resourceGroupName string, galleryName string, imageName string) pluginsdk.StateRefreshFunc { // The resource Shared Image depends on the resource Shared Image Gallery. // Although the delete API returns 404 which means the Shared Image resource has been deleted. // Then it tries to immediately delete Shared Image Gallery but it still throws error `Can not delete resource before nested resources are deleted.` @@ -333,7 +331,7 @@ func sharedImageDeleteStateRefreshFunc(ctx context.Context, client *compute.Gall } } -func expandGalleryImageIdentifier(d *schema.ResourceData) *compute.GalleryImageIdentifier { +func expandGalleryImageIdentifier(d *pluginsdk.ResourceData) *compute.GalleryImageIdentifier { vs := d.Get("identifier").([]interface{}) v := vs[0].(map[string]interface{}) diff --git a/azurerm/internal/services/compute/shared_image_resource_test.go b/azurerm/internal/services/compute/shared_image_resource_test.go index 8398eb71e6c5..4ef8f929b099 100644 --- a/azurerm/internal/services/compute/shared_image_resource_test.go +++ b/azurerm/internal/services/compute/shared_image_resource_test.go @@ -5,12 +5,11 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -20,10 +19,10 @@ type SharedImageResource struct { func TestAccSharedImage_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_shared_image", "test") r := SharedImageResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data, ""), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("description").HasValue(""), ), @@ -35,10 +34,10 @@ func TestAccSharedImage_basic(t *testing.T) { func TestAccSharedImage_basic_hyperVGeneration_V2(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_shared_image", "test") r := SharedImageResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data, "V2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("description").HasValue(""), check.That(data.ResourceName).Key("hyper_v_generation").HasValue("V2"), @@ -52,10 +51,10 @@ func TestAccSharedImage_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_shared_image", "test") r := SharedImageResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data, ""), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("description").HasValue(""), ), @@ -67,10 +66,10 @@ func TestAccSharedImage_requiresImport(t *testing.T) { func TestAccSharedImage_complete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_shared_image", "test") r := SharedImageResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.complete(data, "V1"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("os_type").HasValue("Linux"), check.That(data.ResourceName).Key("hyper_v_generation").HasValue("V1"), @@ -87,10 +86,10 @@ func TestAccSharedImage_complete(t *testing.T) { func TestAccSharedImage_specialized(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_shared_image", "test") r := SharedImageResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.specialized(data, "V1"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -98,7 +97,7 @@ func TestAccSharedImage_specialized(t *testing.T) { }) } -func (t SharedImageResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t SharedImageResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.SharedImageID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/compute/shared_image_version_data_source.go b/azurerm/internal/services/compute/shared_image_version_data_source.go index 173b3f79d2ef..e739f069e711 100644 --- a/azurerm/internal/services/compute/shared_image_version_data_source.go +++ b/azurerm/internal/services/compute/shared_image_version_data_source.go @@ -6,38 +6,38 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceSharedImageVersion() *schema.Resource { - return &schema.Resource{ +func dataSourceSharedImageVersion() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceSharedImageVersionRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.SharedImageVersionName, }, "gallery_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.SharedImageGalleryName, }, "image_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.SharedImageName, }, @@ -47,37 +47,37 @@ func dataSourceSharedImageVersion() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupNameForDataSource(), "managed_image_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "os_disk_snapshot_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "os_disk_image_size_gb": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "target_region": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "regional_replica_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "storage_account_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -85,7 +85,7 @@ func dataSourceSharedImageVersion() *schema.Resource { }, "exclude_from_latest": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, @@ -94,7 +94,7 @@ func dataSourceSharedImageVersion() *schema.Resource { } } -func dataSourceSharedImageVersionRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceSharedImageVersionRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.GalleryImageVersionsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/shared_image_version_data_source_test.go b/azurerm/internal/services/compute/shared_image_version_data_source_test.go index 8de7d0a57e0b..01e722621b40 100644 --- a/azurerm/internal/services/compute/shared_image_version_data_source_test.go +++ b/azurerm/internal/services/compute/shared_image_version_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -16,11 +15,11 @@ func TestAccDataSourceSharedImageVersion_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_shared_image_version", "test") r := SharedImageVersionDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { // need to create a vm and then reference it in the image creation Config: SharedImageVersionResource{}.setup(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(ImageResource{}.virtualMachineExists, "azurerm_virtual_machine.testsource"), data.CheckWithClientForResource(ImageResource{}.generalizeVirtualMachine(data), "azurerm_virtual_machine.testsource"), ), @@ -30,7 +29,7 @@ func TestAccDataSourceSharedImageVersion_basic(t *testing.T) { }, { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("managed_image_id").Exists(), check.That(data.ResourceName).Key("target_region.#").HasValue("1"), check.That(data.ResourceName).Key("target_region.0.storage_account_type").HasValue("Standard_LRS"), @@ -43,11 +42,11 @@ func TestAccDataSourceSharedImageVersion_latest(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_shared_image_version", "test") r := SharedImageVersionDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { // need to create a vm and then reference it in the image creation Config: SharedImageVersionResource{}.setup(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(ImageResource{}.virtualMachineExists, "azurerm_virtual_machine.testsource"), data.CheckWithClientForResource(ImageResource{}.generalizeVirtualMachine(data), "azurerm_virtual_machine.testsource"), ), @@ -57,7 +56,7 @@ func TestAccDataSourceSharedImageVersion_latest(t *testing.T) { }, { Config: r.customName(data, "latest"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("managed_image_id").Exists(), check.That(data.ResourceName).Key("target_region.#").HasValue("1"), check.That(data.ResourceName).Key("target_region.0.storage_account_type").HasValue("Standard_LRS"), @@ -70,12 +69,12 @@ func TestAccDataSourceSharedImageVersion_recent(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_shared_image_version", "test") r := SharedImageVersionDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { // need to create a vm and then reference it in the image creation Config: SharedImageVersionResource{}.setup(data), Destroy: false, - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(ImageResource{}.virtualMachineExists, "azurerm_virtual_machine.testsource"), data.CheckWithClientForResource(ImageResource{}.generalizeVirtualMachine(data), "azurerm_virtual_machine.testsource"), ), @@ -85,7 +84,7 @@ func TestAccDataSourceSharedImageVersion_recent(t *testing.T) { }, { Config: r.customName(data, "recent"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("managed_image_id").Exists(), check.That(data.ResourceName).Key("target_region.#").HasValue("1"), check.That(data.ResourceName).Key("target_region.0.storage_account_type").HasValue("Standard_LRS"), diff --git a/azurerm/internal/services/compute/shared_image_version_resource.go b/azurerm/internal/services/compute/shared_image_version_resource.go index bfb080f29624..0592ecde84bf 100644 --- a/azurerm/internal/services/compute/shared_image_version_resource.go +++ b/azurerm/internal/services/compute/shared_image_version_resource.go @@ -7,9 +7,6 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -18,12 +15,13 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceSharedImageVersion() *schema.Resource { - return &schema.Resource{ +func resourceSharedImageVersion() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceSharedImageVersionCreateUpdate, Read: resourceSharedImageVersionRead, Update: resourceSharedImageVersionCreateUpdate, @@ -34,30 +32,30 @@ func resourceSharedImageVersion() *schema.Resource { return err }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.SharedImageVersionName, }, "gallery_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.SharedImageGalleryName, }, "image_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.SharedImageName, @@ -68,28 +66,28 @@ func resourceSharedImageVersion() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "target_region": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, StateFunc: location.StateFunc, DiffSuppressFunc: location.DiffSuppressFunc, }, "regional_replica_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, }, // The Service API doesn't support to update `storage_account_type`. So it has to recreate the resource for updating `storage_account_type`. // However, `ForceNew` cannot be used since resource would be recreated while adding or removing `target_region`. // And `CustomizeDiff` also cannot be used since it doesn't support in a `Set`. - // So currently terraform would directly return the error message from Service API while updating this property. If this property needs to be updated, please recreate this resource. + // So currently terraform would directly return the error message from Service API while updating this property. If this property needs to be updated, please recreate this pluginsdk. "storage_account_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ string(compute.StorageAccountTypeStandardLRS), @@ -102,7 +100,7 @@ func resourceSharedImageVersion() *schema.Resource { }, "os_disk_snapshot_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ExactlyOneOf: []string{"os_disk_snapshot_id", "managed_image_id"}, @@ -110,7 +108,7 @@ func resourceSharedImageVersion() *schema.Resource { }, "managed_image_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validation.Any( @@ -121,7 +119,7 @@ func resourceSharedImageVersion() *schema.Resource { }, "exclude_from_latest": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, @@ -131,7 +129,7 @@ func resourceSharedImageVersion() *schema.Resource { } } -func resourceSharedImageVersionCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceSharedImageVersionCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.GalleryImageVersionsClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -199,7 +197,7 @@ func resourceSharedImageVersionCreateUpdate(d *schema.ResourceData, meta interfa return resourceSharedImageVersionRead(d, meta) } -func resourceSharedImageVersionRead(d *schema.ResourceData, meta interface{}) error { +func resourceSharedImageVersionRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.GalleryImageVersionsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -253,7 +251,7 @@ func resourceSharedImageVersionRead(d *schema.ResourceData, meta interface{}) er return tags.FlattenAndSet(d, resp.Tags) } -func resourceSharedImageVersionDelete(d *schema.ResourceData, meta interface{}) error { +func resourceSharedImageVersionDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.GalleryImageVersionsClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -275,7 +273,7 @@ func resourceSharedImageVersionDelete(d *schema.ResourceData, meta interface{}) // @tombuildsstuff: there appears to be an eventual consistency issue here timeout, _ := ctx.Deadline() log.Printf("[DEBUG] Waiting for %s to be eventually deleted", *id) - stateConf := &resource.StateChangeConf{ + stateConf := &pluginsdk.StateChangeConf{ Pending: []string{"Exists"}, Target: []string{"NotFound"}, Refresh: sharedImageVersionDeleteStateRefreshFunc(ctx, client, *id), @@ -284,14 +282,14 @@ func resourceSharedImageVersionDelete(d *schema.ResourceData, meta interface{}) Timeout: time.Until(timeout), } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for %s to be deleted: %+v", *id, err) } return nil } -func sharedImageVersionDeleteStateRefreshFunc(ctx context.Context, client *compute.GalleryImageVersionsClient, id parse.SharedImageVersionId) resource.StateRefreshFunc { +func sharedImageVersionDeleteStateRefreshFunc(ctx context.Context, client *compute.GalleryImageVersionsClient, id parse.SharedImageVersionId) pluginsdk.StateRefreshFunc { // Whilst the Shared Image Version is deleted quickly, it appears it's not actually finished replicating at this time // so the deletion of the parent Shared Image fails with "can not delete until nested resources are deleted" // ergo we need to poll on this for a bit @@ -309,8 +307,8 @@ func sharedImageVersionDeleteStateRefreshFunc(ctx context.Context, client *compu } } -func expandSharedImageVersionTargetRegions(d *schema.ResourceData) *[]compute.TargetRegion { - vs := d.Get("target_region").(*schema.Set) +func expandSharedImageVersionTargetRegions(d *pluginsdk.ResourceData) *[]compute.TargetRegion { + vs := d.Get("target_region").(*pluginsdk.Set) results := make([]compute.TargetRegion, 0) for _, v := range vs.List() { diff --git a/azurerm/internal/services/compute/shared_image_version_resource_test.go b/azurerm/internal/services/compute/shared_image_version_resource_test.go index c6dad56d3382..9299d1392c79 100644 --- a/azurerm/internal/services/compute/shared_image_version_resource_test.go +++ b/azurerm/internal/services/compute/shared_image_version_resource_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -22,18 +21,18 @@ func TestAccSharedImageVersion_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_shared_image_version", "test") r := SharedImageVersionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // need to create a vm and then reference it in the image creation Config: r.setup(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(ImageResource{}.virtualMachineExists, "azurerm_virtual_machine.testsource"), data.CheckWithClientForResource(ImageResource{}.generalizeVirtualMachine(data), "azurerm_virtual_machine.testsource"), ), }, { Config: r.imageVersion(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("managed_image_id").Exists(), check.That(data.ResourceName).Key("target_region.#").HasValue("1"), @@ -41,7 +40,7 @@ func TestAccSharedImageVersion_basic(t *testing.T) { }, { Config: r.imageVersionUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("managed_image_id").Exists(), check.That(data.ResourceName).Key("target_region.#").HasValue("2"), @@ -55,19 +54,19 @@ func TestAccSharedImageVersion_basic(t *testing.T) { func TestAccSharedImageVersion_storageAccountTypeLrs(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_shared_image_version", "test") r := SharedImageVersionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // need to create a vm and then reference it in the image creation Config: r.setup(data), Destroy: false, - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(ImageResource{}.virtualMachineExists, "azurerm_virtual_machine.testsource"), data.CheckWithClientForResource(ImageResource{}.generalizeVirtualMachine(data), "azurerm_virtual_machine.testsource"), ), }, { Config: r.imageVersionStorageAccountType(data, "Standard_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("managed_image_id").Exists(), check.That(data.ResourceName).Key("target_region.#").HasValue("1"), @@ -81,19 +80,19 @@ func TestAccSharedImageVersion_storageAccountTypeZrs(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_shared_image_version", "test") r := SharedImageVersionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // need to create a vm and then reference it in the image creation Config: r.setup(data), Destroy: false, - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(ImageResource{}.virtualMachineExists, "azurerm_virtual_machine.testsource"), data.CheckWithClientForResource(ImageResource{}.generalizeVirtualMachine(data), "azurerm_virtual_machine.testsource"), ), }, { Config: r.imageVersionStorageAccountType(data, "Standard_ZRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("managed_image_id").Exists(), check.That(data.ResourceName).Key("target_region.#").HasValue("1"), @@ -107,10 +106,10 @@ func TestAccSharedImageVersion_specializedImageVersionBySnapshot(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_shared_image_version", "test") r := SharedImageVersionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.imageVersionSpecializedBySnapshot(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), // the share image version will generate a shared access signature (SAS) on the referenced snapshot and keep it active until the replication is complete // in the meantime, the service will return success of creation before the replication complete. @@ -126,10 +125,10 @@ func TestAccSharedImageVersion_specializedImageVersionByVM(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_shared_image_version", "test") r := SharedImageVersionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.imageVersionSpecializedByVM(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -141,19 +140,19 @@ func TestAccSharedImageVersion_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_shared_image_version", "test") r := SharedImageVersionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // need to create a vm and then reference it in the image creation Config: r.setup(data), Destroy: false, - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(ImageResource{}.virtualMachineExists, "azurerm_virtual_machine.testsource"), data.CheckWithClientForResource(ImageResource{}.generalizeVirtualMachine(data), "azurerm_virtual_machine.testsource"), ), }, { Config: r.imageVersion(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("managed_image_id").Exists(), check.That(data.ResourceName).Key("target_region.#").HasValue("1"), @@ -166,7 +165,7 @@ func TestAccSharedImageVersion_requiresImport(t *testing.T) { }) } -func (r SharedImageVersionResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (r SharedImageVersionResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.SharedImageVersionID(state.ID) if err != nil { return nil, err @@ -180,7 +179,7 @@ func (r SharedImageVersionResource) Exists(ctx context.Context, clients *clients return utils.Bool(resp.ID != nil), nil } -func (SharedImageVersionResource) revokeSnapshot(ctx context.Context, client *clients.Client, state *terraform.InstanceState) error { +func (SharedImageVersionResource) revokeSnapshot(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) error { snapShotName := state.Attributes["name"] resourceGroup := state.Attributes["resource_group_name"] diff --git a/azurerm/internal/services/compute/shared_image_versions_data_source.go b/azurerm/internal/services/compute/shared_image_versions_data_source.go index b8d2f9cd70d2..4b525cc9a683 100644 --- a/azurerm/internal/services/compute/shared_image_versions_data_source.go +++ b/azurerm/internal/services/compute/shared_image_versions_data_source.go @@ -5,32 +5,32 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceSharedImageVersions() *schema.Resource { - return &schema.Resource{ +func dataSourceSharedImageVersions() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceSharedImageVersionsRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "gallery_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.SharedImageGalleryName, }, "image_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.SharedImageName, }, @@ -40,39 +40,39 @@ func dataSourceSharedImageVersions() *schema.Resource { "tags_filter": tags.Schema(), "images": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "location": azure.SchemaLocationForDataSource(), "managed_image_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "target_region": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "regional_replica_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "storage_account_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -80,7 +80,7 @@ func dataSourceSharedImageVersions() *schema.Resource { }, "exclude_from_latest": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, @@ -92,7 +92,7 @@ func dataSourceSharedImageVersions() *schema.Resource { } } -func dataSourceSharedImageVersionsRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceSharedImageVersionsRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.GalleryImageVersionsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/shared_image_versions_data_source_test.go b/azurerm/internal/services/compute/shared_image_versions_data_source_test.go index 68635639c270..bf06aa154d07 100644 --- a/azurerm/internal/services/compute/shared_image_versions_data_source_test.go +++ b/azurerm/internal/services/compute/shared_image_versions_data_source_test.go @@ -5,7 +5,6 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -17,19 +16,19 @@ func TestAccDataSourceSharedImageVersions_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_shared_image_versions", "test") r := SharedImageVersionsDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { // need to create a vm and then reference it in the image creation Config: SharedImageVersionResource{}.setup(data), Destroy: false, - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(ImageResource{}.virtualMachineExists, "azurerm_virtual_machine.testsource"), data.CheckWithClientForResource(ImageResource{}.generalizeVirtualMachine(data), "azurerm_virtual_machine.testsource"), ), }, { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("images.0.managed_image_id").Exists(), check.That(data.ResourceName).Key("images.0.target_region.#").HasValue("1"), check.That(data.ResourceName).Key("images.0.target_region.0.storage_account_type").HasValue("Standard_LRS"), @@ -42,12 +41,12 @@ func TestAccDataSourceSharedImageVersions_tagsFilterError(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_shared_image_versions", "test") r := SharedImageVersionsDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { // need to create a vm and then reference it in the image creation Config: SharedImageVersionResource{}.setup(data), Destroy: false, - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(ImageResource{}.virtualMachineExists, "azurerm_virtual_machine.testsource"), data.CheckWithClientForResource(ImageResource{}.generalizeVirtualMachine(data), "azurerm_virtual_machine.testsource"), ), @@ -62,19 +61,19 @@ func TestAccDataSourceSharedImageVersions_tagsFilterError(t *testing.T) { func TestAccDataSourceSharedImageVersions_tagsFilter(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_shared_image_versions", "test") r := SharedImageVersionsDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { // need to create a vm and then reference it in the image creation Config: SharedImageVersionResource{}.setup(data), Destroy: false, - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(ImageResource{}.virtualMachineExists, "azurerm_virtual_machine.testsource"), data.CheckWithClientForResource(ImageResource{}.generalizeVirtualMachine(data), "azurerm_virtual_machine.testsource"), ), }, { Config: r.tagsFilter(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("images.#").HasValue("1"), ), }, diff --git a/azurerm/internal/services/compute/shared_schema.go b/azurerm/internal/services/compute/shared_schema.go index 12943dd446b9..db30db4bf955 100644 --- a/azurerm/internal/services/compute/shared_schema.go +++ b/azurerm/internal/services/compute/shared_schema.go @@ -4,32 +4,32 @@ import ( "fmt" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" keyVaultValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/keyvault/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func additionalUnattendContentSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func additionalUnattendContentSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Optional: true, // whilst the SDK supports updating, the API doesn't: // Code="PropertyChangeNotAllowed" // Message="Changing property 'windowsConfiguration.additionalUnattendContent' is not allowed." // Target="windowsConfiguration.additionalUnattendContent ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "content": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, Sensitive: true, }, "setting": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ @@ -61,7 +61,7 @@ func expandAdditionalUnattendContent(input []interface{}) *[]compute.AdditionalU return &output } -func flattenAdditionalUnattendContent(input *[]compute.AdditionalUnattendContent, d *schema.ResourceData) []interface{} { +func flattenAdditionalUnattendContent(input *[]compute.AdditionalUnattendContent, d *pluginsdk.ResourceData) []interface{} { if input == nil { return []interface{}{} } @@ -95,17 +95,17 @@ func flattenAdditionalUnattendContent(input *[]compute.AdditionalUnattendContent return output } -func bootDiagnosticsSchema() *schema.Schema { +func bootDiagnosticsSchema() *pluginsdk.Schema { //lintignore:XS003 - return &schema.Schema{ - Type: schema.TypeList, + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ // TODO: should this be `storage_account_endpoint`? "storage_account_uri": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, // TODO: validation }, @@ -163,15 +163,15 @@ func flattenBootDiagnostics(input *compute.DiagnosticsProfile) []interface{} { } } -func linuxSecretSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func linuxSecretSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ // whilst this isn't present in the nested object it's required when this is specified "key_vault_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: azure.ValidateResourceID, // TODO: more granular validation }, @@ -179,13 +179,13 @@ func linuxSecretSchema() *schema.Schema { // whilst we /could/ flatten this to `certificate_urls` we're intentionally not to keep this // closer to the Windows VMSS resource, which will also take a `store` param "certificate": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "url": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: keyVaultValidate.NestedItemId, }, @@ -204,7 +204,7 @@ func expandLinuxSecrets(input []interface{}) *[]compute.VaultSecretGroup { v := raw.(map[string]interface{}) keyVaultId := v["key_vault_id"].(string) - certificatesRaw := v["certificate"].(*schema.Set).List() + certificatesRaw := v["certificate"].(*pluginsdk.Set).List() certificates := make([]compute.VaultCertificate, 0) for _, certificateRaw := range certificatesRaw { certificateV := certificateRaw.(map[string]interface{}) @@ -262,28 +262,28 @@ func flattenLinuxSecrets(input *[]compute.VaultSecretGroup) []interface{} { return output } -func planSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func planSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Optional: true, ForceNew: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "product": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "publisher": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, @@ -335,39 +335,39 @@ func flattenPlan(input *compute.Plan) []interface{} { } } -func sourceImageReferenceSchema(isVirtualMachine bool) *schema.Schema { +func sourceImageReferenceSchema(isVirtualMachine bool) *pluginsdk.Schema { // whilst originally I was hoping we could use the 'id' from `azurerm_platform_image' unfortunately Azure doesn't // like this as a value for the 'id' field: // Id /...../Versions/16.04.201909091 is not a valid resource reference." // as such the image is split into two fields (source_image_id and source_image_reference) to provide better validation - return &schema.Schema{ - Type: schema.TypeList, + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Optional: true, ForceNew: isVirtualMachine, MaxItems: 1, ConflictsWith: []string{"source_image_id"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "publisher": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: isVirtualMachine, ValidateFunc: validation.StringIsNotEmpty, }, "offer": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: isVirtualMachine, ValidateFunc: validation.StringIsNotEmpty, }, "sku": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: isVirtualMachine, ValidateFunc: validation.StringIsNotEmpty, }, "version": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: isVirtualMachine, ValidateFunc: validation.StringIsNotEmpty, @@ -428,19 +428,19 @@ func flattenSourceImageReference(input *compute.ImageReference) []interface{} { } } -func winRmListenerSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeSet, +func winRmListenerSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeSet, Optional: true, // Whilst the SDK allows you to modify this, the API does not: // Code="PropertyChangeNotAllowed" // Message="Changing property 'windowsConfiguration.winRM.listeners' is not allowed." // Target="windowsConfiguration.winRM.listeners" ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "protocol": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ @@ -450,7 +450,7 @@ func winRmListenerSchema() *schema.Schema { }, "certificate_url": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: keyVaultValidate.NestedItemId, @@ -505,31 +505,31 @@ func flattenWinRMListener(input *compute.WinRMConfiguration) []interface{} { return output } -func windowsSecretSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func windowsSecretSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ // whilst this isn't present in the nested object it's required when this is specified "key_vault_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: azure.ValidateResourceID, }, "certificate": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "store": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "url": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: keyVaultValidate.NestedItemId, }, @@ -548,7 +548,7 @@ func expandWindowsSecrets(input []interface{}) *[]compute.VaultSecretGroup { v := raw.(map[string]interface{}) keyVaultId := v["key_vault_id"].(string) - certificatesRaw := v["certificate"].(*schema.Set).List() + certificatesRaw := v["certificate"].(*pluginsdk.Set).List() certificates := make([]compute.VaultCertificate, 0) for _, certificateRaw := range certificatesRaw { certificateV := certificateRaw.(map[string]interface{}) diff --git a/azurerm/internal/services/compute/snapshot_data_source.go b/azurerm/internal/services/compute/snapshot_data_source.go index 459c94c44556..69d9086394e4 100644 --- a/azurerm/internal/services/compute/snapshot_data_source.go +++ b/azurerm/internal/services/compute/snapshot_data_source.go @@ -4,24 +4,24 @@ import ( "fmt" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceSnapshot() *schema.Resource { - return &schema.Resource{ +func dataSourceSnapshot() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceSnapshotRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, @@ -29,72 +29,72 @@ func dataSourceSnapshot() *schema.Resource { // Computed "os_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "disk_size_gb": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "time_created": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "creation_option": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "source_uri": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "source_resource_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "storage_account_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "encryption_settings": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "disk_encryption_key": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "secret_url": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "source_vault_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, }, }, "key_encryption_key": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "key_url": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "source_vault_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -107,7 +107,7 @@ func dataSourceSnapshot() *schema.Resource { } } -func dataSourceSnapshotRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceSnapshotRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.SnapshotsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/snapshot_data_source_test.go b/azurerm/internal/services/compute/snapshot_data_source_test.go index 3d7587bb7bf1..b0bd6f20afd0 100644 --- a/azurerm/internal/services/compute/snapshot_data_source_test.go +++ b/azurerm/internal/services/compute/snapshot_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -16,10 +15,10 @@ func TestAccDataSourceSnapshot_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_snapshot", "snapshot") r := SnapshotDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("name").Exists(), check.That(data.ResourceName).Key("resource_group_name").Exists(), ), @@ -31,10 +30,10 @@ func TestAccDataSourceSnapshot_encryption(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_snapshot", "snapshot") r := SnapshotDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.encryption(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("name").Exists(), check.That(data.ResourceName).Key("resource_group_name").Exists(), check.That(data.ResourceName).Key("encryption_settings.0.enabled").HasValue("true"), diff --git a/azurerm/internal/services/compute/snapshot_resource.go b/azurerm/internal/services/compute/snapshot_resource.go index 0c10da211f6f..0b8681c1b6d2 100644 --- a/azurerm/internal/services/compute/snapshot_resource.go +++ b/azurerm/internal/services/compute/snapshot_resource.go @@ -5,23 +5,21 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceSnapshot() *schema.Resource { - return &schema.Resource{ +func resourceSnapshot() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceSnapshotCreateUpdate, Read: resourceSnapshotRead, Update: resourceSnapshotCreateUpdate, @@ -29,16 +27,16 @@ func resourceSnapshot() *schema.Resource { // TODO: replace this with an importer which validates the ID during import Importer: pluginsdk.DefaultImporter(), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.SnapshotName, @@ -49,7 +47,7 @@ func resourceSnapshot() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "create_option": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ string(compute.Copy), @@ -59,25 +57,25 @@ func resourceSnapshot() *schema.Resource { }, "source_uri": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, }, "source_resource_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, }, "storage_account_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, }, "disk_size_gb": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, }, @@ -89,7 +87,7 @@ func resourceSnapshot() *schema.Resource { } } -func resourceSnapshotCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceSnapshotCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.SnapshotsClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -165,7 +163,7 @@ func resourceSnapshotCreateUpdate(d *schema.ResourceData, meta interface{}) erro return resourceSnapshotRead(d, meta) } -func resourceSnapshotRead(d *schema.ResourceData, meta interface{}) error { +func resourceSnapshotRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.SnapshotsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -216,7 +214,7 @@ func resourceSnapshotRead(d *schema.ResourceData, meta interface{}) error { return tags.FlattenAndSet(d, resp.Tags) } -func resourceSnapshotDelete(d *schema.ResourceData, meta interface{}) error { +func resourceSnapshotDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.SnapshotsClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/snapshot_resource_test.go b/azurerm/internal/services/compute/snapshot_resource_test.go index 15aebb9fced2..957339e6e606 100644 --- a/azurerm/internal/services/compute/snapshot_resource_test.go +++ b/azurerm/internal/services/compute/snapshot_resource_test.go @@ -5,12 +5,11 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -21,10 +20,10 @@ func TestAccSnapshot_fromManagedDisk(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_snapshot", "test") r := SnapshotResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.fromManagedDisk(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -36,10 +35,10 @@ func TestAccSnapshot_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_snapshot", "test") r := SnapshotResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.fromManagedDisk(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -54,10 +53,10 @@ func TestAccSnapshot_encryption(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_snapshot", "test") r := SnapshotResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.encryption(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -69,16 +68,16 @@ func TestAccSnapshot_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_snapshot", "test") r := SnapshotResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.fromManagedDisk(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Config: r.fromManagedDiskUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -89,10 +88,10 @@ func TestAccSnapshot_extendingManagedDisk(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_snapshot", "test") r := SnapshotResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extendingManagedDisk(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -103,10 +102,10 @@ func TestAccSnapshot_fromExistingSnapshot(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_snapshot", "second") r := SnapshotResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.fromExistingSnapshot(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -117,17 +116,17 @@ func TestAccSnapshot_fromUnmanagedDisk(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_snapshot", "test") r := SnapshotResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.fromUnmanagedDisk(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, }) } -func (t SnapshotResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t SnapshotResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := azure.ParseAzureResourceID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/compute/ssh_keys.go b/azurerm/internal/services/compute/ssh_keys.go index 4c0d0fb426f3..19ea7fccdb0c 100644 --- a/azurerm/internal/services/compute/ssh_keys.go +++ b/azurerm/internal/services/compute/ssh_keys.go @@ -7,25 +7,25 @@ import ( "regexp" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func SSHKeysSchema(isVirtualMachine bool) *schema.Schema { +func SSHKeysSchema(isVirtualMachine bool) *pluginsdk.Schema { // the SSH Keys for a Virtual Machine cannot be changed once provisioned: // Code="PropertyChangeNotAllowed" Message="Changing property 'linuxConfiguration.ssh.publicKeys' is not allowed." - return &schema.Schema{ - Type: schema.TypeSet, + return &pluginsdk.Schema{ + Type: pluginsdk.TypeSet, Optional: true, ForceNew: isVirtualMachine, Set: SSHKeySchemaHash, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "public_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: isVirtualMachine, ValidateFunc: validate.SSHKey, @@ -33,7 +33,7 @@ func SSHKeysSchema(isVirtualMachine bool) *schema.Schema { }, "username": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: isVirtualMachine, ValidateFunc: validation.StringIsNotEmpty, @@ -114,7 +114,7 @@ func parseUsernameFromAuthorizedKeysPath(input string) *string { return nil } -func SSHKeyDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { +func SSHKeyDiffSuppress(_, old, new string, _ *pluginsdk.ResourceData) bool { oldNormalised, err := NormaliseSSHKey(old) if err != nil { log.Printf("[DEBUG] error normalising ssh key %q: %+v", old, err) @@ -146,5 +146,5 @@ func SSHKeySchemaHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%s", m["username"])) } - return schema.HashString(buf.String()) + return pluginsdk.HashString(buf.String()) } diff --git a/azurerm/internal/services/compute/ssh_public_key_data_source.go b/azurerm/internal/services/compute/ssh_public_key_data_source.go index 13a345665a8d..3c223ffc27ed 100644 --- a/azurerm/internal/services/compute/ssh_public_key_data_source.go +++ b/azurerm/internal/services/compute/ssh_public_key_data_source.go @@ -5,27 +5,27 @@ import ( "regexp" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceSshPublicKey() *schema.Resource { - return &schema.Resource{ +func dataSourceSshPublicKey() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceSshPublicKeyRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringMatch( regexp.MustCompile("^[-a-zA-Z0-9(_)]{1,128}$"), @@ -36,7 +36,7 @@ func dataSourceSshPublicKey() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupNameForDataSource(), "public_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, @@ -45,7 +45,7 @@ func dataSourceSshPublicKey() *schema.Resource { } } -func dataSourceSshPublicKeyRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceSshPublicKeyRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.SSHPublicKeysClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/ssh_public_key_data_source_test.go b/azurerm/internal/services/compute/ssh_public_key_data_source_test.go index 35b4824881b6..9f8c6a0fbcdc 100644 --- a/azurerm/internal/services/compute/ssh_public_key_data_source_test.go +++ b/azurerm/internal/services/compute/ssh_public_key_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -18,10 +17,10 @@ func TestAccDataSourceAzureSshPublicKey_basic(t *testing.T) { key1 := "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+wWK73dCr+jgQOAxNsHAnNNNMEMWOHYEccp6wJm2gotpr9katuF/ZAdou5AaW1C61slRkHRkpRRX9FA9CYBiitZgvCCz+3nWNN7l/Up54Zps/pHWGZLHNJZRYyAB6j5yVLMVHIHriY49d/GZTZVNB8GoJv9Gakwc/fuEZYYl4YDFiGMBP///TzlI4jhiJzjKnEvqPFki5p2ZRJqcbCiF4pJrxUQR/RXqVFQdbRLZgYfJ8xGB878RENq3yQ39d8dVOkq4edbkzwcUmwwwkYVPIoDGsYLaRHnG+To7FvMeyO7xDVQkMKzopTQV8AuKpyvpqu0a9pWOMaiCyDytO7GGN you@me.com" - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.template(data, key1), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("public_key").HasValue(key1), ), }, diff --git a/azurerm/internal/services/compute/ssh_public_key_resource.go b/azurerm/internal/services/compute/ssh_public_key_resource.go index bad45f9ef19b..29b283ef0a81 100644 --- a/azurerm/internal/services/compute/ssh_public_key_resource.go +++ b/azurerm/internal/services/compute/ssh_public_key_resource.go @@ -6,24 +6,22 @@ import ( "regexp" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceSshPublicKey() *schema.Resource { - return &schema.Resource{ +func resourceSshPublicKey() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceSshPublicKeyCreate, Read: resourceSshPublicKeyRead, Update: resourceSshPublicKeyUpdate, @@ -34,16 +32,16 @@ func resourceSshPublicKey() *schema.Resource { return err }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(45 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(45 * time.Minute), - Delete: schema.DefaultTimeout(45 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(45 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(45 * time.Minute), + Delete: pluginsdk.DefaultTimeout(45 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringMatch( @@ -59,7 +57,7 @@ func resourceSshPublicKey() *schema.Resource { "location": azure.SchemaLocation(), "public_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: false, ValidateFunc: validate.SSHKey, @@ -70,7 +68,7 @@ func resourceSshPublicKey() *schema.Resource { } } -func resourceSshPublicKeyCreate(d *schema.ResourceData, meta interface{}) error { +func resourceSshPublicKeyCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.SSHPublicKeysClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -120,7 +118,7 @@ func resourceSshPublicKeyCreate(d *schema.ResourceData, meta interface{}) error return resourceSshPublicKeyRead(d, meta) } -func resourceSshPublicKeyRead(d *schema.ResourceData, meta interface{}) error { +func resourceSshPublicKeyRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.SSHPublicKeysClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) @@ -155,7 +153,7 @@ func resourceSshPublicKeyRead(d *schema.ResourceData, meta interface{}) error { return tags.FlattenAndSet(d, resp.Tags) } -func resourceSshPublicKeyUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceSshPublicKeyUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.SSHPublicKeysClient ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -201,7 +199,7 @@ func resourceSshPublicKeyUpdate(d *schema.ResourceData, meta interface{}) error return resourceSshPublicKeyRead(d, meta) } -func resourceSshPublicKeyDelete(d *schema.ResourceData, meta interface{}) error { +func resourceSshPublicKeyDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.SSHPublicKeysClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/ssh_public_key_resource_test.go b/azurerm/internal/services/compute/ssh_public_key_resource_test.go index 3eacb5e01197..6cb39e9c81ab 100644 --- a/azurerm/internal/services/compute/ssh_public_key_resource_test.go +++ b/azurerm/internal/services/compute/ssh_public_key_resource_test.go @@ -5,12 +5,11 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -24,10 +23,10 @@ func TestAccSshPublicKey_CreateUpdate(t *testing.T) { key1 := "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+wWK73dCr+jgQOAxNsHAnNNNMEMWOHYEccp6wJm2gotpr9katuF/ZAdou5AaW1C61slRkHRkpRRX9FA9CYBiitZgvCCz+3nWNN7l/Up54Zps/pHWGZLHNJZRYyAB6j5yVLMVHIHriY49d/GZTZVNB8GoJv9Gakwc/fuEZYYl4YDFiGMBP///TzlI4jhiJzjKnEvqPFki5p2ZRJqcbCiF4pJrxUQR/RXqVFQdbRLZgYfJ8xGB878RENq3yQ39d8dVOkq4edbkzwcUmwwwkYVPIoDGsYLaRHnG+To7FvMeyO7xDVQkMKzopTQV8AuKpyvpqu0a9pWOMaiCyDytO7GGN you@me.com" key2 := "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/NDMj2wG6bSa6jbn6E3LYlUsYiWMp1CQ2sGAijPALW6OrSu30lz7nKpoh8Qdw7/A4nAJgweI5Oiiw5/BOaGENM70Go+VM8LQMSxJ4S7/8MIJEZQp5HcJZ7XDTcEwruknrd8mllEfGyFzPvJOx6QAQocFhXBW6+AlhM3gn/dvV5vdrO8ihjET2GoDUqXPYC57ZuY+/Fz6W3KV8V97BvNUhpY5yQrP5VpnyvvXNFQtzDfClTvZFPuoHQi3/KYPi6O0FSD74vo8JOBZZY09boInPejkm9fvHQqfh0bnN7B6XJoUwC1Qprrx+XIy7ust5AEn5XL7d4lOvcR14MxDDKEp you@me.com" - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.template(data, key1), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("public_key").HasValue(key1), ), @@ -35,7 +34,7 @@ func TestAccSshPublicKey_CreateUpdate(t *testing.T) { data.ImportStep(), { Config: r.template(data, key2), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("public_key").HasValue(key2), ), @@ -44,7 +43,7 @@ func TestAccSshPublicKey_CreateUpdate(t *testing.T) { }) } -func (t SSHPublicKeyResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t SSHPublicKeyResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.SSHPublicKeyID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/compute/validate/compute.go b/azurerm/internal/services/compute/validate/compute.go index 6b57d300ed1b..033972b60665 100644 --- a/azurerm/internal/services/compute/validate/compute.go +++ b/azurerm/internal/services/compute/validate/compute.go @@ -4,8 +4,8 @@ import ( "fmt" "regexp" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" ) func SharedImageGalleryName(v interface{}, k string) (warnings []string, errors []error) { @@ -51,16 +51,16 @@ func SharedImageVersionName(v interface{}, k string) (warnings []string, errors } // VirtualMachineTimeZone returns a case-sensitive validation function for the Time Zones for a Virtual Machine -func VirtualMachineTimeZone() schema.SchemaValidateFunc { +func VirtualMachineTimeZone() pluginsdk.SchemaValidateFunc { return virtualMachineTimeZone(false) } // VirtualMachineTimeZone returns a case-insensitive validation function for the Time Zones for a Virtual Machine -func VirtualMachineTimeZoneCaseInsensitive() schema.SchemaValidateFunc { +func VirtualMachineTimeZoneCaseInsensitive() pluginsdk.SchemaValidateFunc { return virtualMachineTimeZone(true) } -func virtualMachineTimeZone(ignoreCase bool) schema.SchemaValidateFunc { +func virtualMachineTimeZone(ignoreCase bool) pluginsdk.SchemaValidateFunc { // Candidates are listed here: http://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/ candidates := []string{ "", diff --git a/azurerm/internal/services/compute/validate/compute_test.go b/azurerm/internal/services/compute/validate/compute_test.go index 72095f8200c1..070ffceb0da9 100644 --- a/azurerm/internal/services/compute/validate/compute_test.go +++ b/azurerm/internal/services/compute/validate/compute_test.go @@ -1,9 +1,8 @@ package validate import ( + "strings" "testing" - - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" ) func TestSharedImageGalleryName(t *testing.T) { @@ -40,11 +39,11 @@ func TestSharedImageGalleryName(t *testing.T) { ShouldError: true, }, { - Input: acctest.RandString(79), + Input: strings.Repeat("a", 79), ShouldError: false, }, { - Input: acctest.RandString(80), + Input: strings.Repeat("a", 80), ShouldError: true, }, } @@ -99,11 +98,11 @@ func TestSharedImageName(t *testing.T) { ShouldError: false, }, { - Input: acctest.RandString(79), + Input: strings.Repeat("a", 79), ShouldError: false, }, { - Input: acctest.RandString(80), + Input: strings.Repeat("a", 80), ShouldError: true, }, } diff --git a/azurerm/internal/services/compute/validate/dedicated_host_group_name.go b/azurerm/internal/services/compute/validate/dedicated_host_group_name.go index 991fb09315a3..a3e3de7c3d5e 100644 --- a/azurerm/internal/services/compute/validate/dedicated_host_group_name.go +++ b/azurerm/internal/services/compute/validate/dedicated_host_group_name.go @@ -3,7 +3,7 @@ package validate import ( "regexp" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" ) func DedicatedHostGroupName() func(i interface{}, k string) (warnings []string, errors []error) { diff --git a/azurerm/internal/services/compute/validate/snapshot_name_test.go b/azurerm/internal/services/compute/validate/snapshot_name_test.go index 2b437d5e1f00..db1e4fd3e9c5 100644 --- a/azurerm/internal/services/compute/validate/snapshot_name_test.go +++ b/azurerm/internal/services/compute/validate/snapshot_name_test.go @@ -1,13 +1,12 @@ package validate import ( + "strings" "testing" - - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" ) func TestSnapshotName_validation(t *testing.T) { - str := acctest.RandString(80) + str := strings.Repeat("a", 80) cases := []struct { Value string ErrCount int diff --git a/azurerm/internal/services/compute/virtual_machine.go b/azurerm/internal/services/compute/virtual_machine.go index 84afe5305af1..85034c8451da 100644 --- a/azurerm/internal/services/compute/virtual_machine.go +++ b/azurerm/internal/services/compute/virtual_machine.go @@ -2,33 +2,34 @@ package compute import ( "context" - "fmt" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/identity" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" msiparse "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/msi/parse" - msivalidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/msi/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func virtualMachineAdditionalCapabilitiesSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +type virtualMachineIdentity = identity.SystemAssignedUserAssigned + +func virtualMachineAdditionalCapabilitiesSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ // TODO: confirm this command // NOTE: requires registration to use: // $ az feature show --namespace Microsoft.Compute --name UltraSSDWithVMSS // $ az provider register -n Microsoft.Compute "ultra_ssd_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, @@ -67,111 +68,47 @@ func flattenVirtualMachineAdditionalCapabilities(input *compute.AdditionalCapabi } } -func virtualMachineIdentitySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - string(compute.ResourceIdentityTypeSystemAssigned), - string(compute.ResourceIdentityTypeUserAssigned), - string(compute.ResourceIdentityTypeSystemAssignedUserAssigned), - }, false), - }, - - "identity_ids": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: msivalidate.UserAssignedIdentityID, - }, - }, - - "principal_id": { - Type: schema.TypeString, - Computed: true, - }, - - "tenant_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - } -} - func expandVirtualMachineIdentity(input []interface{}) (*compute.VirtualMachineIdentity, error) { - if len(input) == 0 { - // TODO: Does this want to be this, or nil? - return &compute.VirtualMachineIdentity{ - Type: compute.ResourceIdentityTypeNone, - }, nil - } - - raw := input[0].(map[string]interface{}) - - identity := compute.VirtualMachineIdentity{ - Type: compute.ResourceIdentityType(raw["type"].(string)), + config, err := virtualMachineIdentity{}.Expand(input) + if err != nil { + return nil, err } - identityIdsRaw := raw["identity_ids"].(*schema.Set).List() - identityIds := make(map[string]*compute.VirtualMachineIdentityUserAssignedIdentitiesValue) - for _, v := range identityIdsRaw { - identityIds[v.(string)] = &compute.VirtualMachineIdentityUserAssignedIdentitiesValue{} - } - - if len(identityIds) > 0 { - if identity.Type != compute.ResourceIdentityTypeUserAssigned && identity.Type != compute.ResourceIdentityTypeSystemAssignedUserAssigned { - return nil, fmt.Errorf("`identity_ids` can only be specified when `type` includes `UserAssigned`") + var identityIds map[string]*compute.VirtualMachineIdentityUserAssignedIdentitiesValue + if config.UserAssignedIdentityIds != nil { + identityIds = map[string]*compute.VirtualMachineIdentityUserAssignedIdentitiesValue{} + for _, id := range *config.UserAssignedIdentityIds { + identityIds[id] = &compute.VirtualMachineIdentityUserAssignedIdentitiesValue{} } - - identity.UserAssignedIdentities = identityIds } - return &identity, nil + return &compute.VirtualMachineIdentity{ + Type: compute.ResourceIdentityType(config.Type), + UserAssignedIdentities: identityIds, + }, nil } func flattenVirtualMachineIdentity(input *compute.VirtualMachineIdentity) ([]interface{}, error) { - if input == nil || input.Type == compute.ResourceIdentityTypeNone { - return []interface{}{}, nil - } + var config *identity.ExpandedConfig - identityIds := make([]string, 0) - if input.UserAssignedIdentities != nil { - for key := range input.UserAssignedIdentities { - parsedId, err := msiparse.UserAssignedIdentityIDInsensitively(key) + if input != nil { + var identityIds []string + for id := range input.UserAssignedIdentities { + parsedId, err := msiparse.UserAssignedIdentityIDInsensitively(id) if err != nil { return nil, err } identityIds = append(identityIds, parsedId.ID()) } - } - principalId := "" - if input.PrincipalID != nil { - principalId = *input.PrincipalID - } - - tenantId := "" - if input.TenantID != nil { - tenantId = *input.TenantID + config = &identity.ExpandedConfig{ + Type: string(input.Type), + PrincipalId: input.PrincipalID, + TenantId: input.TenantID, + UserAssignedIdentityIds: &identityIds, + } } - - return []interface{}{ - map[string]interface{}{ - "type": string(input.Type), - "identity_ids": identityIds, - "principal_id": principalId, - "tenant_id": tenantId, - }, - }, nil + return virtualMachineIdentity{}.Flatten(config), nil } func expandVirtualMachineNetworkInterfaceIDs(input []interface{}) []compute.NetworkInterfaceReference { @@ -207,15 +144,15 @@ func flattenVirtualMachineNetworkInterfaceIDs(input *[]compute.NetworkInterfaceR return output } -func virtualMachineOSDiskSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func virtualMachineOSDiskSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Required: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "caching": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ string(compute.CachingTypesNone), @@ -224,7 +161,7 @@ func virtualMachineOSDiskSchema() *schema.Schema { }, false), }, "storage_account_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, // whilst this appears in the Update block the API returns this when changing: // Changing property 'osDisk.managedDisk.storageAccountType' is not allowed @@ -239,14 +176,14 @@ func virtualMachineOSDiskSchema() *schema.Schema { // Optional "diff_disk_settings": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, ForceNew: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "option": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ @@ -258,7 +195,7 @@ func virtualMachineOSDiskSchema() *schema.Schema { }, "disk_encryption_set_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, // the Compute/VM API is broken and returns the Resource Group name in UPPERCASE DiffSuppressFunc: suppress.CaseDifference, @@ -266,21 +203,21 @@ func virtualMachineOSDiskSchema() *schema.Schema { }, "disk_size_gb": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validation.IntBetween(0, 4095), }, "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, Computed: true, }, "write_accelerator_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, diff --git a/azurerm/internal/services/compute/virtual_machine_data_disk_attachment_resource.go b/azurerm/internal/services/compute/virtual_machine_data_disk_attachment_resource.go index 5fbf3b1bfd59..91a797e3225e 100644 --- a/azurerm/internal/services/compute/virtual_machine_data_disk_attachment_resource.go +++ b/azurerm/internal/services/compute/virtual_machine_data_disk_attachment_resource.go @@ -6,20 +6,19 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceVirtualMachineDataDiskAttachment() *schema.Resource { - return &schema.Resource{ +func resourceVirtualMachineDataDiskAttachment() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceVirtualMachineDataDiskAttachmentCreateUpdate, Read: resourceVirtualMachineDataDiskAttachmentRead, Update: resourceVirtualMachineDataDiskAttachmentCreateUpdate, @@ -27,16 +26,16 @@ func resourceVirtualMachineDataDiskAttachment() *schema.Resource { // TODO: replace this with an importer which validates the ID during import Importer: pluginsdk.DefaultImporter(), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "managed_disk_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, DiffSuppressFunc: suppress.CaseDifference, @@ -44,21 +43,21 @@ func resourceVirtualMachineDataDiskAttachment() *schema.Resource { }, "virtual_machine_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: azure.ValidateResourceID, }, "lun": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, ForceNew: true, ValidateFunc: validation.IntAtLeast(0), }, "caching": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ string(compute.CachingTypesNone), @@ -69,7 +68,7 @@ func resourceVirtualMachineDataDiskAttachment() *schema.Resource { }, "create_option": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, Default: string(compute.DiskCreateOptionTypesAttach), @@ -81,7 +80,7 @@ func resourceVirtualMachineDataDiskAttachment() *schema.Resource { }, "write_accelerator_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, @@ -89,7 +88,7 @@ func resourceVirtualMachineDataDiskAttachment() *schema.Resource { } } -func resourceVirtualMachineDataDiskAttachmentCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceVirtualMachineDataDiskAttachmentCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -191,7 +190,7 @@ func resourceVirtualMachineDataDiskAttachmentCreateUpdate(d *schema.ResourceData return resourceVirtualMachineDataDiskAttachmentRead(d, meta) } -func resourceVirtualMachineDataDiskAttachmentRead(d *schema.ResourceData, meta interface{}) error { +func resourceVirtualMachineDataDiskAttachmentRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -251,7 +250,7 @@ func resourceVirtualMachineDataDiskAttachmentRead(d *schema.ResourceData, meta i return nil } -func resourceVirtualMachineDataDiskAttachmentDelete(d *schema.ResourceData, meta interface{}) error { +func resourceVirtualMachineDataDiskAttachmentDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -304,7 +303,7 @@ func resourceVirtualMachineDataDiskAttachmentDelete(d *schema.ResourceData, meta return nil } -func retrieveDataDiskAttachmentManagedDisk(d *schema.ResourceData, meta interface{}, id string) (*compute.Disk, error) { +func retrieveDataDiskAttachmentManagedDisk(d *pluginsdk.ResourceData, meta interface{}, id string) (*compute.Disk, error) { client := meta.(*clients.Client).Compute.DisksClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/virtual_machine_data_disk_attachment_resource_test.go b/azurerm/internal/services/compute/virtual_machine_data_disk_attachment_resource_test.go index 0ef89d0f3bbb..66278fced443 100644 --- a/azurerm/internal/services/compute/virtual_machine_data_disk_attachment_resource_test.go +++ b/azurerm/internal/services/compute/virtual_machine_data_disk_attachment_resource_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -22,10 +21,10 @@ func TestAccVirtualMachineDataDiskAttachment_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_data_disk_attachment", "test") r := VirtualMachineDataDiskAttachmentResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("virtual_machine_id").Exists(), check.That(data.ResourceName).Key("managed_disk_id").Exists(), @@ -41,10 +40,10 @@ func TestAccVirtualMachineDataDiskAttachment_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_data_disk_attachment", "test") r := VirtualMachineDataDiskAttachmentResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -59,7 +58,7 @@ func TestAccVirtualMachineDataDiskAttachment_destroy(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_data_disk_attachment", "test") r := VirtualMachineDataDiskAttachmentResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ data.DisappearsStep(acceptance.DisappearsStepData{ Config: r.basic, TestResource: r, @@ -73,19 +72,19 @@ func TestAccVirtualMachineDataDiskAttachment_multipleDisks(t *testing.T) { secondResourceName := "azurerm_virtual_machine_data_disk_attachment.second" - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.multipleDisks(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("virtual_machine_id").Exists(), check.That(data.ResourceName).Key("managed_disk_id").Exists(), check.That(data.ResourceName).Key("lun").HasValue("10"), check.That(data.ResourceName).Key("caching").HasValue("None"), - resource.TestCheckResourceAttrSet(secondResourceName, "virtual_machine_id"), - resource.TestCheckResourceAttrSet(secondResourceName, "managed_disk_id"), - resource.TestCheckResourceAttr(secondResourceName, "lun", "20"), - resource.TestCheckResourceAttr(secondResourceName, "caching", "ReadOnly"), + acceptance.TestCheckResourceAttrSet(secondResourceName, "virtual_machine_id"), + acceptance.TestCheckResourceAttrSet(secondResourceName, "managed_disk_id"), + acceptance.TestCheckResourceAttr(secondResourceName, "lun", "20"), + acceptance.TestCheckResourceAttr(secondResourceName, "caching", "ReadOnly"), ), }, data.ImportStep(), @@ -100,24 +99,24 @@ func TestAccVirtualMachineDataDiskAttachment_multipleDisks(t *testing.T) { func TestAccVirtualMachineDataDiskAttachment_updatingCaching(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_data_disk_attachment", "test") r := VirtualMachineDataDiskAttachmentResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("caching").HasValue("None"), ), }, { Config: r.readOnly(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("caching").HasValue("ReadOnly"), ), }, { Config: r.readWrite(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("caching").HasValue("ReadWrite"), ), @@ -128,24 +127,24 @@ func TestAccVirtualMachineDataDiskAttachment_updatingCaching(t *testing.T) { func TestAccVirtualMachineDataDiskAttachment_updatingWriteAccelerator(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_data_disk_attachment", "test") r := VirtualMachineDataDiskAttachmentResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.writeAccelerator(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("write_accelerator_enabled").HasValue("false"), ), }, { Config: r.writeAccelerator(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("write_accelerator_enabled").HasValue("true"), ), }, { Config: r.writeAccelerator(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("write_accelerator_enabled").HasValue("false"), ), @@ -157,10 +156,10 @@ func TestAccVirtualMachineDataDiskAttachment_managedServiceIdentity(t *testing.T data := acceptance.BuildTestData(t, "azurerm_virtual_machine_data_disk_attachment", "test") r := VirtualMachineDataDiskAttachmentResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.managedServiceIdentity(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("virtual_machine_id").Exists(), check.That(data.ResourceName).Key("managed_disk_id").Exists(), @@ -175,13 +174,13 @@ func TestAccVirtualMachineDataDiskAttachment_managedServiceIdentity(t *testing.T func TestAccVirtualMachineDataDiskAttachment_virtualMachineExtension(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_data_disk_attachment", "test") r := VirtualMachineDataDiskAttachmentResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.virtualMachineExtensionPrep(data), }, { Config: r.virtualMachineExtensionComplete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("virtual_machine_id").Exists(), check.That(data.ResourceName).Key("managed_disk_id").Exists(), @@ -190,7 +189,7 @@ func TestAccVirtualMachineDataDiskAttachment_virtualMachineExtension(t *testing. }) } -func (t VirtualMachineDataDiskAttachmentResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t VirtualMachineDataDiskAttachmentResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := azure.ParseAzureResourceID(state.ID) if err != nil { return nil, err @@ -220,7 +219,7 @@ func (t VirtualMachineDataDiskAttachmentResource) Exists(ctx context.Context, cl return utils.Bool(disk != nil), nil } -func (VirtualMachineDataDiskAttachmentResource) Destroy(ctx context.Context, client *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (VirtualMachineDataDiskAttachmentResource) Destroy(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := azure.ParseAzureResourceID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/compute/virtual_machine_data_source.go b/azurerm/internal/services/compute/virtual_machine_data_source.go index 91915917be29..f3855289c602 100644 --- a/azurerm/internal/services/compute/virtual_machine_data_source.go +++ b/azurerm/internal/services/compute/virtual_machine_data_source.go @@ -4,25 +4,25 @@ import ( "fmt" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceVirtualMachine() *schema.Resource { - return &schema.Resource{ +func dataSourceVirtualMachine() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceVirtualMachineRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.NoZeroValues, }, @@ -32,30 +32,30 @@ func dataSourceVirtualMachine() *schema.Resource { "location": azure.SchemaLocationForDataSource(), "identity": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "identity_ids": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "principal_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "tenant_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -65,7 +65,7 @@ func dataSourceVirtualMachine() *schema.Resource { } } -func dataSourceVirtualMachineRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceVirtualMachineRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/virtual_machine_data_source_test.go b/azurerm/internal/services/compute/virtual_machine_data_source_test.go index f5a6ade393f6..41e50e73a417 100644 --- a/azurerm/internal/services/compute/virtual_machine_data_source_test.go +++ b/azurerm/internal/services/compute/virtual_machine_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -16,10 +15,10 @@ func TestAccDataSourceAzureRMVirtualMachine_basicLinux(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_virtual_machine", "test") r := VirtualMachineDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basicLinux(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("identity.#").HasValue("1"), check.That(data.ResourceName).Key("identity.0.type").HasValue("SystemAssigned"), check.That(data.ResourceName).Key("identity.0.principal_id").Exists(), @@ -33,10 +32,10 @@ func TestAccDataSourceAzureRMVirtualMachine_basicWindows(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_virtual_machine", "test") r := VirtualMachineDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basicWindows(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("identity.#").HasValue("1"), check.That(data.ResourceName).Key("identity.0.type").HasValue("SystemAssigned"), check.That(data.ResourceName).Key("identity.0.principal_id").Exists(), diff --git a/azurerm/internal/services/compute/virtual_machine_extension_resource.go b/azurerm/internal/services/compute/virtual_machine_extension_resource.go index c4767885274f..80f24acc6477 100644 --- a/azurerm/internal/services/compute/virtual_machine_extension_resource.go +++ b/azurerm/internal/services/compute/virtual_machine_extension_resource.go @@ -5,21 +5,19 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/structure" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceVirtualMachineExtension() *schema.Resource { - return &schema.Resource{ +func resourceVirtualMachineExtension() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceVirtualMachineExtensionsCreateUpdate, Read: resourceVirtualMachineExtensionsRead, Update: resourceVirtualMachineExtensionsCreateUpdate, @@ -30,61 +28,61 @@ func resourceVirtualMachineExtension() *schema.Resource { return err }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "virtual_machine_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.VirtualMachineID, }, "publisher": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "type_handler_version": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "auto_upgrade_minor_version": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, }, "settings": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringIsJSON, - DiffSuppressFunc: structure.SuppressJsonDiff, + DiffSuppressFunc: pluginsdk.SuppressJsonDiff, }, // due to the sensitive nature, these are not returned by the API "protected_settings": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Sensitive: true, ValidateFunc: validation.StringIsJSON, - DiffSuppressFunc: structure.SuppressJsonDiff, + DiffSuppressFunc: pluginsdk.SuppressJsonDiff, }, "tags": tags.Schema(), @@ -92,7 +90,7 @@ func resourceVirtualMachineExtension() *schema.Resource { } } -func resourceVirtualMachineExtensionsCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceVirtualMachineExtensionsCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { vmExtensionClient := meta.(*clients.Client).Compute.VMExtensionClient vmClient := meta.(*clients.Client).Compute.VMClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) @@ -147,7 +145,7 @@ func resourceVirtualMachineExtensionsCreateUpdate(d *schema.ResourceData, meta i } if settingsString := d.Get("settings").(string); settingsString != "" { - settings, err := structure.ExpandJsonFromString(settingsString) + settings, err := pluginsdk.ExpandJsonFromString(settingsString) if err != nil { return fmt.Errorf("unable to parse settings: %s", err) } @@ -155,7 +153,7 @@ func resourceVirtualMachineExtensionsCreateUpdate(d *schema.ResourceData, meta i } if protectedSettingsString := d.Get("protected_settings").(string); protectedSettingsString != "" { - protectedSettings, err := structure.ExpandJsonFromString(protectedSettingsString) + protectedSettings, err := pluginsdk.ExpandJsonFromString(protectedSettingsString) if err != nil { return fmt.Errorf("unable to parse protected_settings: %s", err) } @@ -185,7 +183,7 @@ func resourceVirtualMachineExtensionsCreateUpdate(d *schema.ResourceData, meta i return resourceVirtualMachineExtensionsRead(d, meta) } -func resourceVirtualMachineExtensionsRead(d *schema.ResourceData, meta interface{}) error { +func resourceVirtualMachineExtensionsRead(d *pluginsdk.ResourceData, meta interface{}) error { vmExtensionClient := meta.(*clients.Client).Compute.VMExtensionClient vmClient := meta.(*clients.Client).Compute.VMClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) @@ -226,7 +224,7 @@ func resourceVirtualMachineExtensionsRead(d *schema.ResourceData, meta interface if settings := props.Settings; settings != nil { settingsVal := settings.(map[string]interface{}) - settingsJson, err := structure.FlattenJsonToString(settingsVal) + settingsJson, err := pluginsdk.FlattenJsonToString(settingsVal) if err != nil { return fmt.Errorf("unable to parse settings from response: %s", err) } @@ -237,7 +235,7 @@ func resourceVirtualMachineExtensionsRead(d *schema.ResourceData, meta interface return tags.FlattenAndSet(d, resp.Tags) } -func resourceVirtualMachineExtensionsDelete(d *schema.ResourceData, meta interface{}) error { +func resourceVirtualMachineExtensionsDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMExtensionClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/virtual_machine_extension_resource_test.go b/azurerm/internal/services/compute/virtual_machine_extension_resource_test.go index 9c1eeec1cd8c..3e30dfa52533 100644 --- a/azurerm/internal/services/compute/virtual_machine_extension_resource_test.go +++ b/azurerm/internal/services/compute/virtual_machine_extension_resource_test.go @@ -6,12 +6,11 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -22,20 +21,20 @@ func TestAccVirtualMachineExtension_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_extension", "test") r := VirtualMachineExtensionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - resource.TestMatchResourceAttr(data.ResourceName, "settings", regexp.MustCompile("hostname")), + acceptance.TestMatchResourceAttr(data.ResourceName, "settings", regexp.MustCompile("hostname")), ), }, data.ImportStep("protected_settings"), { Config: r.basicUpdate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - resource.TestMatchResourceAttr(data.ResourceName, "settings", regexp.MustCompile("whoami")), + acceptance.TestMatchResourceAttr(data.ResourceName, "settings", regexp.MustCompile("whoami")), ), }, data.ImportStep("protected_settings"), @@ -46,10 +45,10 @@ func TestAccVirtualMachineExtension_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_extension", "test") r := VirtualMachineExtensionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -65,13 +64,13 @@ func TestAccVirtualMachineExtension_concurrent(t *testing.T) { r := VirtualMachineExtensionResource{} secondResourceName := "azurerm_virtual_machine_extension.test2" - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.concurrent(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - resource.TestMatchResourceAttr(data.ResourceName, "settings", regexp.MustCompile("hostname")), - resource.TestMatchResourceAttr(secondResourceName, "settings", regexp.MustCompile("whoami")), + acceptance.TestMatchResourceAttr(data.ResourceName, "settings", regexp.MustCompile("hostname")), + acceptance.TestMatchResourceAttr(secondResourceName, "settings", regexp.MustCompile("whoami")), ), }, }) @@ -81,17 +80,17 @@ func TestAccVirtualMachineExtension_linuxDiagnostics(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_extension", "test") r := VirtualMachineExtensionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.linuxDiagnostics(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, }) } -func (t VirtualMachineExtensionResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t VirtualMachineExtensionResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.VirtualMachineExtensionID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/compute/virtual_machine_import.go b/azurerm/internal/services/compute/virtual_machine_import.go index 8e5075174e87..f68a3fe14ba0 100644 --- a/azurerm/internal/services/compute/virtual_machine_import.go +++ b/azurerm/internal/services/compute/virtual_machine_import.go @@ -4,19 +4,17 @@ import ( "context" "fmt" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) func importVirtualMachine(osType compute.OperatingSystemTypes, resourceType string) pluginsdk.ImporterFunc { return func(ctx context.Context, d *pluginsdk.ResourceData, meta interface{}) (data []*pluginsdk.ResourceData, err error) { id, err := parse.VirtualMachineID(d.Id()) if err != nil { - return []*schema.ResourceData{}, err + return []*pluginsdk.ResourceData{}, err } client := meta.(*clients.Client).Compute.VMClient diff --git a/azurerm/internal/services/compute/virtual_machine_managed_disks_resource_test.go b/azurerm/internal/services/compute/virtual_machine_managed_disks_resource_test.go index 1798b72f3e22..adae2ec1ef28 100644 --- a/azurerm/internal/services/compute/virtual_machine_managed_disks_resource_test.go +++ b/azurerm/internal/services/compute/virtual_machine_managed_disks_resource_test.go @@ -6,7 +6,6 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" @@ -18,10 +17,10 @@ func TestAccVirtualMachine_basicLinuxMachine_managedDisk_standardSSD(t *testing. data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinuxMachine_managedDisk_standardSSD(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("storage_os_disk.0.managed_disk_type").HasValue("StandardSSD_LRS"), ), @@ -33,10 +32,10 @@ func TestAccVirtualMachine_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinuxMachine_managedDisk_standardSSD(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -51,10 +50,10 @@ func TestAccVirtualMachine_basicLinuxMachine_managedDisk_explicit(t *testing.T) data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinuxMachine_managedDisk_explicit(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -65,10 +64,10 @@ func TestAccVirtualMachine_basicLinuxMachine_managedDisk_implicit(t *testing.T) data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinuxMachine_managedDisk_implicit(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -79,10 +78,10 @@ func TestAccVirtualMachine_basicLinuxMachine_managedDisk_attach(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinuxMachine_managedDisk_attach(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -93,10 +92,10 @@ func TestAccVirtualMachine_withDataDisk_managedDisk_explicit(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.withDataDisk_managedDisk_explicit(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -107,10 +106,10 @@ func TestAccVirtualMachine_withDataDisk_managedDisk_implicit(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.withDataDisk_managedDisk_implicit(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -124,11 +123,11 @@ func TestAccVirtualMachine_deleteManagedDiskOptOut(t *testing.T) { var osDiskId string var dataDiskId string - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Destroy: false, Config: r.withDataDisk_managedDisk_implicit(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), data.CheckWithClient(r.findManagedDiskID("storage_os_disk.0.name", &osDiskId)), data.CheckWithClient(r.findManagedDiskID("storage_data_disk.0.name", &dataDiskId)), @@ -136,7 +135,7 @@ func TestAccVirtualMachine_deleteManagedDiskOptOut(t *testing.T) { }, { Config: r.basicLinuxMachineDeleteVM_managedDisk(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClient(r.managedDiskExists(osDiskId, true)), data.CheckWithClient(r.managedDiskExists(dataDiskId, true)), ), @@ -151,11 +150,11 @@ func TestAccVirtualMachine_deleteManagedDiskOptIn(t *testing.T) { var osDiskId string var dataDiskId string - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Destroy: false, Config: r.basicLinuxMachine_managedDisk_DestroyDisksBefore(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), data.CheckWithClient(r.findManagedDiskID("storage_os_disk.0.name", &osDiskId)), data.CheckWithClient(r.findManagedDiskID("storage_data_disk.0.name", &dataDiskId)), @@ -163,7 +162,7 @@ func TestAccVirtualMachine_deleteManagedDiskOptIn(t *testing.T) { }, { Config: r.basicLinuxMachine_managedDisk_DestroyDisksAfter(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClient(r.managedDiskExists(osDiskId, false)), data.CheckWithClient(r.managedDiskExists(dataDiskId, false)), ), @@ -175,7 +174,7 @@ func TestAccVirtualMachine_osDiskTypeConflict(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.osDiskTypeConflict(data), ExpectError: regexp.MustCompile("conflicts with storage_os_disk.0.managed_disk_type"), @@ -187,7 +186,7 @@ func TestAccVirtualMachine_dataDiskTypeConflict(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.dataDiskTypeConflict(data), ExpectError: regexp.MustCompile("Conflict between `vhd_uri`"), @@ -199,7 +198,7 @@ func TestAccVirtualMachine_bug33(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.bug33(data), }, @@ -210,17 +209,17 @@ func TestAccVirtualMachine_attachSecondDataDiskWithAttachOption(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinuxMachine_managedDisk_empty(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("storage_data_disk.0.create_option").HasValue("Empty"), ), }, { Config: r.basicLinuxMachine_managedDisk_attach(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("storage_data_disk.0.create_option").HasValue("Empty"), check.That(data.ResourceName).Key("storage_data_disk.1.create_option").HasValue("Attach"), @@ -233,7 +232,7 @@ func TestAccVirtualMachine_linuxNoConfig(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.linuxNoConfig(data), ExpectError: regexp.MustCompile("Error: either a `os_profile_linux_config` or a `os_profile_windows_config` must be specified."), @@ -245,7 +244,7 @@ func TestAccVirtualMachine_windowsNoConfig(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.windowsNoConfig(data), ExpectError: regexp.MustCompile("Error: either a `os_profile_linux_config` or a `os_profile_windows_config` must be specified."), @@ -261,10 +260,10 @@ func TestAccVirtualMachine_multipleNICs(t *testing.T) { firstNicName := fmt.Sprintf("%s/acctni1-%d", prefix, data.RandomInteger) secondNicName := fmt.Sprintf("%s/acctni2-%d", prefix, data.RandomInteger) - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.multipleNICs(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("network_interface_ids.0").HasValue(firstNicName), check.That(data.ResourceName).Key("network_interface_ids.1").HasValue(secondNicName), ), @@ -276,14 +275,14 @@ func TestAccVirtualMachine_managedServiceIdentity(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.withManagedServiceIdentity(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.0.type").HasValue("SystemAssigned"), - resource.TestMatchResourceAttr(data.ResourceName, "identity.0.principal_id", validate.UUIDRegExp), - resource.TestMatchOutput("principal_id", validate.UUIDRegExp), + acceptance.TestMatchResourceAttr(data.ResourceName, "identity.0.principal_id", validate.UUIDRegExp), + acceptance.TestMatchOutput("principal_id", validate.UUIDRegExp), ), }, }) @@ -293,10 +292,10 @@ func TestAccVirtualMachine_enableAnWithVM(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.anWithVM(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -307,17 +306,17 @@ func TestAccVirtualMachine_basicLinuxMachine_managedDisk_changeOsWriteAccelerato data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinuxMachine_managedDisk_withOsWriteAcceleratorEnabled(data, "true"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("storage_os_disk.0.write_accelerator_enabled").HasValue("true"), ), }, { Config: r.basicLinuxMachine_managedDisk_withOsWriteAcceleratorEnabled(data, "false"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("storage_os_disk.0.write_accelerator_enabled").HasValue("false"), ), @@ -329,10 +328,10 @@ func TestAccVirtualMachine_basicLinuxMachine_managedDisk_withWriteAcceleratorEna data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinuxMachine_managedDisk_withWriteAcceleratorEnabled(data, "true"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("storage_data_disk.0.write_accelerator_enabled").HasValue("true"), ), @@ -344,17 +343,17 @@ func TestAccVirtualMachine_basicLinuxMachine_managedDisk_changeWriteAcceleratorE data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinuxMachine_managedDisk_withWriteAcceleratorEnabled(data, "false"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("storage_data_disk.0.write_accelerator_enabled").HasValue("false"), ), }, { Config: r.basicLinuxMachine_managedDisk_withWriteAcceleratorEnabled(data, "true"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("storage_data_disk.0.write_accelerator_enabled").HasValue("true"), ), @@ -366,10 +365,10 @@ func TestAccVirtualMachine_winRMCerts(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.winRMCerts(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -380,10 +379,10 @@ func TestAccVirtualMachine_hasDiskInfoWhenStopped(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.hasDiskInfoWhenStopped(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("storage_os_disk.0.managed_disk_type").HasValue("Standard_LRS"), check.That(data.ResourceName).Key("storage_data_disk.0.disk_size_gb").HasValue("64"), @@ -391,7 +390,7 @@ func TestAccVirtualMachine_hasDiskInfoWhenStopped(t *testing.T) { }, { Config: r.hasDiskInfoWhenStopped(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClient(r.deallocate), check.That(data.ResourceName).Key("storage_os_disk.0.managed_disk_type").HasValue("Standard_LRS"), check.That(data.ResourceName).Key("storage_data_disk.0.disk_size_gb").HasValue("64"), @@ -404,7 +403,7 @@ func TestAccVirtualMachine_importBasic_withZone(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinuxMachine_managedDisk_implicit_withZone(data), }, @@ -422,10 +421,10 @@ func TestAccVirtualMachine_ultraSSD(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.ultraSSD(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/compute/virtual_machine_resource.go b/azurerm/internal/services/compute/virtual_machine_resource.go index 0bf63faa0cf1..14e4eddc3dff 100644 --- a/azurerm/internal/services/compute/virtual_machine_resource.go +++ b/azurerm/internal/services/compute/virtual_machine_resource.go @@ -10,9 +10,7 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -24,6 +22,7 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" "github.com/tombuildsstuff/giovanni/storage/2019-12-12/blob/blobs" @@ -33,7 +32,7 @@ import ( var virtualMachineResourceName = "azurerm_virtual_machine" // TODO move into internal/tf/suppress/base64.go -func userDataDiffSuppressFunc(_, old, new string, _ *schema.ResourceData) bool { +func userDataDiffSuppressFunc(_, old, new string, _ *pluginsdk.ResourceData) bool { return userDataStateFunc(old) == new } @@ -51,8 +50,8 @@ func userDataStateFunc(v interface{}) string { // NOTE: the `azurerm_virtual_machine` resource has been superseded by the `azurerm_linux_virtual_machine` and // `azurerm_windows_virtual_machine` resources - as such this resource is feature-frozen and new // functionality will be added to these new resources instead. -func resourceVirtualMachine() *schema.Resource { - return &schema.Resource{ +func resourceVirtualMachine() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceVirtualMachineCreateUpdate, Read: resourceVirtualMachineRead, Update: resourceVirtualMachineCreateUpdate, @@ -60,16 +59,16 @@ func resourceVirtualMachine() *schema.Resource { // TODO: replace this with an importer which validates the ID during import Importer: pluginsdk.DefaultImporter(), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(60 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(60 * time.Minute), - Delete: schema.DefaultTimeout(60 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(60 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(60 * time.Minute), + Delete: pluginsdk.DefaultTimeout(60 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, @@ -81,23 +80,23 @@ func resourceVirtualMachine() *schema.Resource { "zones": azure.SchemaSingleZone(), "plan": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "publisher": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "product": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, }, @@ -105,7 +104,7 @@ func resourceVirtualMachine() *schema.Resource { }, "availability_set_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ForceNew: true, @@ -116,7 +115,7 @@ func resourceVirtualMachine() *schema.Resource { }, "proximity_placement_group_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, @@ -128,14 +127,14 @@ func resourceVirtualMachine() *schema.Resource { }, "identity": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, Computed: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.StringInSlice([]string{ @@ -145,15 +144,15 @@ func resourceVirtualMachine() *schema.Resource { }, false), }, "principal_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "identity_ids": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MinItems: 1, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: msivalidate.UserAssignedIdentityID, }, }, @@ -162,7 +161,7 @@ func resourceVirtualMachine() *schema.Resource { }, "license_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, DiffSuppressFunc: suppress.CaseDifference, @@ -173,46 +172,46 @@ func resourceVirtualMachine() *schema.Resource { }, "vm_size": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, DiffSuppressFunc: suppress.CaseDifference, }, // lintignore:S018 "storage_image_reference": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, Computed: true, ForceNew: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, }, "publisher": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, }, "offer": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, }, "sku": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, }, "version": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ForceNew: true, @@ -223,13 +222,13 @@ func resourceVirtualMachine() *schema.Resource { }, "storage_os_disk": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "os_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ValidateFunc: validation.StringInSlice([]string{ @@ -240,12 +239,12 @@ func resourceVirtualMachine() *schema.Resource { }, "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "vhd_uri": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ConflictsWith: []string{ @@ -255,7 +254,7 @@ func resourceVirtualMachine() *schema.Resource { }, "managed_disk_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, Computed: true, @@ -263,7 +262,7 @@ func resourceVirtualMachine() *schema.Resource { }, "managed_disk_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ConflictsWith: []string{"storage_os_disk.0.vhd_uri"}, @@ -275,31 +274,31 @@ func resourceVirtualMachine() *schema.Resource { }, "image_uri": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "caching": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, }, "create_option": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, DiffSuppressFunc: suppress.CaseDifference, }, "disk_size_gb": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validate.DiskSizeGB, }, "write_accelerator_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, @@ -308,36 +307,36 @@ func resourceVirtualMachine() *schema.Resource { }, "delete_os_disk_on_termination": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, "storage_data_disk": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "vhd_uri": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "managed_disk_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, DiffSuppressFunc: suppress.CaseDifference, }, "managed_disk_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ValidateFunc: validation.StringInSlice([]string{ @@ -349,31 +348,31 @@ func resourceVirtualMachine() *schema.Resource { }, "create_option": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, DiffSuppressFunc: suppress.CaseDifference, }, "caching": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, }, "disk_size_gb": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validate.DiskSizeGB, }, "lun": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, }, "write_accelerator_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, @@ -382,24 +381,24 @@ func resourceVirtualMachine() *schema.Resource { }, "delete_data_disks_on_termination": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, "boot_diagnostics": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Required: true, }, "storage_uri": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, }, @@ -407,13 +406,13 @@ func resourceVirtualMachine() *schema.Resource { }, "additional_capabilities": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "ultra_ssd_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Required: true, ForceNew: true, }, @@ -423,30 +422,30 @@ func resourceVirtualMachine() *schema.Resource { // lintignore:S018 "os_profile": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "computer_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, ForceNew: true, Required: true, }, "admin_username": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "admin_password": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Sensitive: true, }, "custom_data": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, ForceNew: true, Optional: true, Computed: true, @@ -459,35 +458,35 @@ func resourceVirtualMachine() *schema.Resource { // lintignore:S018 "os_profile_windows_config": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "provision_vm_agent": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, "enable_automatic_upgrades": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, "timezone": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validate.VirtualMachineTimeZoneCaseInsensitive(), }, "winrm": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "protocol": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ "HTTP", @@ -496,34 +495,34 @@ func resourceVirtualMachine() *schema.Resource { DiffSuppressFunc: suppress.CaseDifference, }, "certificate_url": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, }, }, }, "additional_unattend_config": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ // TODO: should we make `pass` and `component` Optional + Defaulted? "pass": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ "oobeSystem", }, false), }, "component": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ "Microsoft-Windows-Shell-Setup", }, false), }, "setting_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ "AutoLogon", @@ -531,7 +530,7 @@ func resourceVirtualMachine() *schema.Resource { }, false), }, "content": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, Sensitive: true, }, @@ -546,26 +545,26 @@ func resourceVirtualMachine() *schema.Resource { // lintignore:S018 "os_profile_linux_config": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "disable_password_authentication": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Required: true, }, "ssh_keys": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "path": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "key_data": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, }, @@ -578,26 +577,26 @@ func resourceVirtualMachine() *schema.Resource { }, "os_profile_secrets": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "source_vault_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "vault_certificates": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "certificate_url": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "certificate_store": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, }, @@ -608,16 +607,16 @@ func resourceVirtualMachine() *schema.Resource { }, "network_interface_ids": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, }, "primary_network_interface_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, @@ -626,7 +625,7 @@ func resourceVirtualMachine() *schema.Resource { } } -func resourceVirtualMachineCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceVirtualMachineCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -785,7 +784,7 @@ func resourceVirtualMachineCreateUpdate(d *schema.ResourceData, meta interface{} return resourceVirtualMachineRead(d, meta) } -func resourceVirtualMachineRead(d *schema.ResourceData, meta interface{}) error { +func resourceVirtualMachineRead(d *pluginsdk.ResourceData, meta interface{}) error { vmclient := meta.(*clients.Client).Compute.VMClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -842,7 +841,7 @@ func resourceVirtualMachineRead(d *schema.ResourceData, meta interface{}) error } if profile := props.StorageProfile; profile != nil { - if err := d.Set("storage_image_reference", schema.NewSet(resourceVirtualMachineStorageImageReferenceHash, flattenAzureRmVirtualMachineImageReference(profile.ImageReference))); err != nil { + if err := d.Set("storage_image_reference", pluginsdk.NewSet(resourceVirtualMachineStorageImageReferenceHash, flattenAzureRmVirtualMachineImageReference(profile.ImageReference))); err != nil { return fmt.Errorf("[DEBUG] Error setting Virtual Machine Storage Image Reference error: %#v", err) } @@ -872,15 +871,15 @@ func resourceVirtualMachineRead(d *schema.ResourceData, meta interface{}) error } if profile := props.OsProfile; profile != nil { - if err := d.Set("os_profile", schema.NewSet(resourceVirtualMachineStorageOsProfileHash, flattenAzureRmVirtualMachineOsProfile(profile))); err != nil { + if err := d.Set("os_profile", pluginsdk.NewSet(resourceVirtualMachineStorageOsProfileHash, flattenAzureRmVirtualMachineOsProfile(profile))); err != nil { return fmt.Errorf("Error setting `os_profile`: %#v", err) } - if err := d.Set("os_profile_linux_config", schema.NewSet(resourceVirtualMachineStorageOsProfileLinuxConfigHash, flattenAzureRmVirtualMachineOsProfileLinuxConfiguration(profile.LinuxConfiguration))); err != nil { + if err := d.Set("os_profile_linux_config", pluginsdk.NewSet(resourceVirtualMachineStorageOsProfileLinuxConfigHash, flattenAzureRmVirtualMachineOsProfileLinuxConfiguration(profile.LinuxConfiguration))); err != nil { return fmt.Errorf("Error setting `os_profile_linux_config`: %+v", err) } - if err := d.Set("os_profile_windows_config", schema.NewSet(resourceVirtualMachineStorageOsProfileWindowsConfigHash, flattenAzureRmVirtualMachineOsProfileWindowsConfiguration(profile.WindowsConfiguration))); err != nil { + if err := d.Set("os_profile_windows_config", pluginsdk.NewSet(resourceVirtualMachineStorageOsProfileWindowsConfigHash, flattenAzureRmVirtualMachineOsProfileWindowsConfiguration(profile.WindowsConfiguration))); err != nil { return fmt.Errorf("Error setting `os_profile_windows_config`: %+v", err) } @@ -919,7 +918,7 @@ func resourceVirtualMachineRead(d *schema.ResourceData, meta interface{}) error return tags.FlattenAndSet(d, resp.Tags) } -func resourceVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error { +func resourceVirtualMachineDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -1059,7 +1058,7 @@ func resourceVirtualMachineDeleteVhd(ctx context.Context, storageClient *intStor return nil } -func resourceVirtualMachineDeleteManagedDisk(d *schema.ResourceData, disk *compute.ManagedDiskParameters, meta interface{}) error { +func resourceVirtualMachineDeleteManagedDisk(d *pluginsdk.ResourceData, disk *compute.ManagedDiskParameters, meta interface{}) error { if disk == nil { return fmt.Errorf("`disk` was nil`") } @@ -1417,7 +1416,7 @@ func flattenAzureRmVirtualMachineReviseDiskInfo(result map[string]interface{}, d } } -func expandAzureRmVirtualMachinePlan(d *schema.ResourceData) *compute.Plan { +func expandAzureRmVirtualMachinePlan(d *pluginsdk.ResourceData) *compute.Plan { planConfigs := d.Get("plan").([]interface{}) if len(planConfigs) == 0 { return nil @@ -1436,7 +1435,7 @@ func expandAzureRmVirtualMachinePlan(d *schema.ResourceData) *compute.Plan { } } -func expandAzureRmVirtualMachineIdentity(d *schema.ResourceData) *compute.VirtualMachineIdentity { +func expandAzureRmVirtualMachineIdentity(d *pluginsdk.ResourceData) *compute.VirtualMachineIdentity { v := d.Get("identity") identities := v.([]interface{}) identity := identities[0].(map[string]interface{}) @@ -1458,8 +1457,8 @@ func expandAzureRmVirtualMachineIdentity(d *schema.ResourceData) *compute.Virtua return &vmIdentity } -func expandAzureRmVirtualMachineOsProfile(d *schema.ResourceData) (*compute.OSProfile, error) { - osProfiles := d.Get("os_profile").(*schema.Set).List() +func expandAzureRmVirtualMachineOsProfile(d *pluginsdk.ResourceData) (*compute.OSProfile, error) { + osProfiles := d.Get("os_profile").(*pluginsdk.Set).List() osProfile := osProfiles[0].(map[string]interface{}) @@ -1509,7 +1508,7 @@ func expandAzureRmVirtualMachineOsProfile(d *schema.ResourceData) (*compute.OSPr return profile, nil } -func expandAzureRmVirtualMachineOsProfileSecrets(d *schema.ResourceData) *[]compute.VaultSecretGroup { +func expandAzureRmVirtualMachineOsProfileSecrets(d *pluginsdk.ResourceData) *[]compute.VaultSecretGroup { secretsConfig := d.Get("os_profile_secrets").([]interface{}) secrets := make([]compute.VaultSecretGroup, 0, len(secretsConfig)) @@ -1555,8 +1554,8 @@ func expandAzureRmVirtualMachineOsProfileSecrets(d *schema.ResourceData) *[]comp return &secrets } -func expandAzureRmVirtualMachineOsProfileLinuxConfig(d *schema.ResourceData) *compute.LinuxConfiguration { - osProfilesLinuxConfig := d.Get("os_profile_linux_config").(*schema.Set).List() +func expandAzureRmVirtualMachineOsProfileLinuxConfig(d *pluginsdk.ResourceData) *compute.LinuxConfiguration { + osProfilesLinuxConfig := d.Get("os_profile_linux_config").(*pluginsdk.Set).List() linuxConfig := osProfilesLinuxConfig[0].(map[string]interface{}) disablePasswordAuth := linuxConfig["disable_password_authentication"].(bool) @@ -1592,8 +1591,8 @@ func expandAzureRmVirtualMachineOsProfileLinuxConfig(d *schema.ResourceData) *co return config } -func expandAzureRmVirtualMachineOsProfileWindowsConfig(d *schema.ResourceData) *compute.WindowsConfiguration { - osProfilesWindowsConfig := d.Get("os_profile_windows_config").(*schema.Set).List() +func expandAzureRmVirtualMachineOsProfileWindowsConfig(d *pluginsdk.ResourceData) *compute.WindowsConfiguration { + osProfilesWindowsConfig := d.Get("os_profile_windows_config").(*pluginsdk.Set).List() osProfileConfig := osProfilesWindowsConfig[0].(map[string]interface{}) config := &compute.WindowsConfiguration{} @@ -1663,7 +1662,7 @@ func expandAzureRmVirtualMachineOsProfileWindowsConfig(d *schema.ResourceData) * return config } -func expandAzureRmVirtualMachineDataDisk(d *schema.ResourceData) ([]compute.DataDisk, error) { +func expandAzureRmVirtualMachineDataDisk(d *pluginsdk.ResourceData) ([]compute.DataDisk, error) { disks := d.Get("storage_data_disk").([]interface{}) data_disks := make([]compute.DataDisk, 0, len(disks)) for _, disk_config := range disks { @@ -1728,7 +1727,7 @@ func expandAzureRmVirtualMachineDataDisk(d *schema.ResourceData) ([]compute.Data return data_disks, nil } -func expandAzureRmVirtualMachineDiagnosticsProfile(d *schema.ResourceData) *compute.DiagnosticsProfile { +func expandAzureRmVirtualMachineDiagnosticsProfile(d *pluginsdk.ResourceData) *compute.DiagnosticsProfile { bootDiagnostics := d.Get("boot_diagnostics").([]interface{}) diagnosticsProfile := &compute.DiagnosticsProfile{} @@ -1748,7 +1747,7 @@ func expandAzureRmVirtualMachineDiagnosticsProfile(d *schema.ResourceData) *comp return nil } -func expandAzureRmVirtualMachineAdditionalCapabilities(d *schema.ResourceData) *compute.AdditionalCapabilities { +func expandAzureRmVirtualMachineAdditionalCapabilities(d *pluginsdk.ResourceData) *compute.AdditionalCapabilities { additionalCapabilities := d.Get("additional_capabilities").([]interface{}) if len(additionalCapabilities) == 0 { return nil @@ -1762,8 +1761,8 @@ func expandAzureRmVirtualMachineAdditionalCapabilities(d *schema.ResourceData) * return capability } -func expandAzureRmVirtualMachineImageReference(d *schema.ResourceData) (*compute.ImageReference, error) { - storageImageRefs := d.Get("storage_image_reference").(*schema.Set).List() +func expandAzureRmVirtualMachineImageReference(d *pluginsdk.ResourceData) (*compute.ImageReference, error) { + storageImageRefs := d.Get("storage_image_reference").(*pluginsdk.Set).List() storageImageRef := storageImageRefs[0].(map[string]interface{}) imageID := storageImageRef["id"].(string) @@ -1793,7 +1792,7 @@ func expandAzureRmVirtualMachineImageReference(d *schema.ResourceData) (*compute return &imageReference, nil } -func expandAzureRmVirtualMachineNetworkProfile(d *schema.ResourceData) compute.NetworkProfile { +func expandAzureRmVirtualMachineNetworkProfile(d *pluginsdk.ResourceData) compute.NetworkProfile { nicIds := d.Get("network_interface_ids").([]interface{}) primaryNicId := d.Get("primary_network_interface_id").(string) network_interfaces := make([]compute.NetworkInterfaceReference, 0, len(nicIds)) @@ -1820,7 +1819,7 @@ func expandAzureRmVirtualMachineNetworkProfile(d *schema.ResourceData) compute.N return network_profile } -func expandAzureRmVirtualMachineOsDisk(d *schema.ResourceData) (*compute.OSDisk, error) { +func expandAzureRmVirtualMachineOsDisk(d *pluginsdk.ResourceData) (*compute.OSDisk, error) { disks := d.Get("storage_os_disk").([]interface{}) config := disks[0].(map[string]interface{}) @@ -1900,7 +1899,7 @@ func resourceVirtualMachineStorageOsProfileHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%s-", m["computer_name"].(string))) } - return schema.HashString(buf.String()) + return pluginsdk.HashString(buf.String()) } func resourceVirtualMachineStorageOsProfileWindowsConfigHash(v interface{}) int { @@ -1918,7 +1917,7 @@ func resourceVirtualMachineStorageOsProfileWindowsConfigHash(v interface{}) int } } - return schema.HashString(buf.String()) + return pluginsdk.HashString(buf.String()) } func resourceVirtualMachineStorageOsProfileLinuxConfigHash(v interface{}) int { @@ -1928,7 +1927,7 @@ func resourceVirtualMachineStorageOsProfileLinuxConfigHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%t-", m["disable_password_authentication"].(bool))) } - return schema.HashString(buf.String()) + return pluginsdk.HashString(buf.String()) } func resourceVirtualMachineStorageImageReferenceHash(v interface{}) int { @@ -1949,10 +1948,10 @@ func resourceVirtualMachineStorageImageReferenceHash(v interface{}) int { } } - return schema.HashString(buf.String()) + return pluginsdk.HashString(buf.String()) } -func resourceVirtualMachineGetManagedDiskInfo(d *schema.ResourceData, disk *compute.ManagedDiskParameters, meta interface{}) (*compute.Disk, error) { +func resourceVirtualMachineGetManagedDiskInfo(d *pluginsdk.ResourceData, disk *compute.ManagedDiskParameters, meta interface{}) (*compute.Disk, error) { client := meta.(*clients.Client).Compute.DisksClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/virtual_machine_resource_test.go b/azurerm/internal/services/compute/virtual_machine_resource_test.go index bc48b4a0183a..11988179719d 100644 --- a/azurerm/internal/services/compute/virtual_machine_resource_test.go +++ b/azurerm/internal/services/compute/virtual_machine_resource_test.go @@ -5,14 +5,13 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" "github.com/tombuildsstuff/giovanni/storage/2019-12-12/blob/blobs" ) @@ -24,10 +23,10 @@ func TestAccVirtualMachine_winTimeZone(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.winTimeZone(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -38,14 +37,14 @@ func TestAccVirtualMachine_SystemAssignedIdentity(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.systemAssignedIdentity(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.0.type").HasValue("SystemAssigned"), check.That(data.ResourceName).Key("identity.0.identity_ids.#").HasValue("0"), - resource.TestMatchResourceAttr(data.ResourceName, "identity.0.principal_id", validate.UUIDRegExp), + acceptance.TestMatchResourceAttr(data.ResourceName, "identity.0.principal_id", validate.UUIDRegExp), ), }, }) @@ -55,10 +54,10 @@ func TestAccVirtualMachine_UserAssignedIdentity(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.userAssignedIdentity(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.0.type").HasValue("UserAssigned"), check.That(data.ResourceName).Key("identity.0.identity_ids.#").HasValue("1"), @@ -72,14 +71,14 @@ func TestAccVirtualMachine_multipleAssignedIdentity(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.multipleAssignedIdentity(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.0.type").HasValue("SystemAssigned, UserAssigned"), check.That(data.ResourceName).Key("identity.0.identity_ids.#").HasValue("1"), - resource.TestMatchResourceAttr(data.ResourceName, "identity.0.principal_id", validate.UUIDRegExp), + acceptance.TestMatchResourceAttr(data.ResourceName, "identity.0.principal_id", validate.UUIDRegExp), ), }, }) @@ -89,10 +88,10 @@ func TestAccVirtualMachine_withPPG(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.ppg(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("proximity_placement_group_id").Exists(), ), @@ -100,7 +99,7 @@ func TestAccVirtualMachine_withPPG(t *testing.T) { }) } -func (VirtualMachineResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (VirtualMachineResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := azure.ParseAzureResourceID(state.ID) if err != nil { return nil, err @@ -117,7 +116,7 @@ func (VirtualMachineResource) Exists(ctx context.Context, clients *clients.Clien } func (VirtualMachineResource) managedDiskExists(diskId string, shouldExist bool) acceptance.ClientCheckFunc { - return func(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) error { + return func(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) error { id, err := parse.ManagedDiskID(diskId) if err != nil { return err @@ -144,7 +143,7 @@ func (VirtualMachineResource) managedDiskExists(diskId string, shouldExist bool) } func (VirtualMachineResource) findManagedDiskID(field string, managedDiskID *string) acceptance.ClientCheckFunc { - return func(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) error { + return func(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) error { id, err := parse.VirtualMachineID(state.ID) if err != nil { return err @@ -189,7 +188,7 @@ func (VirtualMachineResource) findManagedDiskID(field string, managedDiskID *str } } -func (VirtualMachineResource) deallocate(ctx context.Context, client *clients.Client, state *terraform.InstanceState) error { +func (VirtualMachineResource) deallocate(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) error { vmID, err := parse.VirtualMachineID(state.ID) if err != nil { return err @@ -211,7 +210,7 @@ func (VirtualMachineResource) deallocate(ctx context.Context, client *clients.Cl } func (VirtualMachineResource) unmanagedDiskExistsInContainer(blobName string, shouldExist bool) acceptance.ClientCheckFunc { - return func(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) error { + return func(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) error { accountName := state.Attributes["storage_account_name"] containerName := state.Attributes["name"] @@ -250,7 +249,7 @@ func (VirtualMachineResource) unmanagedDiskExistsInContainer(blobName string, sh } } -func (VirtualMachineResource) Destroy(ctx context.Context, client *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (VirtualMachineResource) Destroy(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { vmName := state.Attributes["name"] resourceGroup := state.Attributes["resource_group_name"] diff --git a/azurerm/internal/services/compute/virtual_machine_scale_set.go b/azurerm/internal/services/compute/virtual_machine_scale_set.go index be6a5e462480..c2eea12fab52 100644 --- a/azurerm/internal/services/compute/virtual_machine_scale_set.go +++ b/azurerm/internal/services/compute/virtual_machine_scale_set.go @@ -4,28 +4,27 @@ import ( "fmt" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/structure" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" azValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" msiparse "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/msi/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func VirtualMachineScaleSetAdditionalCapabilitiesSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func VirtualMachineScaleSetAdditionalCapabilitiesSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ // NOTE: requires registration to use: // $ az feature show --namespace Microsoft.Compute --name UltraSSDWithVMSS // $ az provider register -n Microsoft.Compute "ultra_ssd_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, ForceNew: true, @@ -65,15 +64,15 @@ func FlattenVirtualMachineScaleSetAdditionalCapabilities(input *compute.Addition } } -func VirtualMachineScaleSetIdentitySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func VirtualMachineScaleSetIdentitySchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ string(compute.ResourceIdentityTypeSystemAssigned), @@ -83,15 +82,15 @@ func VirtualMachineScaleSetIdentitySchema() *schema.Schema { }, "identity_ids": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "principal_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -113,7 +112,7 @@ func ExpandVirtualMachineScaleSetIdentity(input []interface{}) (*compute.Virtual Type: compute.ResourceIdentityType(raw["type"].(string)), } - identityIdsRaw := raw["identity_ids"].(*schema.Set).List() + identityIdsRaw := raw["identity_ids"].(*pluginsdk.Set).List() identityIds := make(map[string]*compute.VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue) for _, v := range identityIdsRaw { identityIds[v.(string)] = &compute.VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue{} @@ -160,14 +159,14 @@ func FlattenVirtualMachineScaleSetIdentity(input *compute.VirtualMachineScaleSet }, nil } -func VirtualMachineScaleSetNetworkInterfaceSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func VirtualMachineScaleSetNetworkInterfaceSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, @@ -175,30 +174,30 @@ func VirtualMachineScaleSetNetworkInterfaceSchema() *schema.Schema { "ip_configuration": virtualMachineScaleSetIPConfigurationSchema(), "dns_servers": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, }, "enable_accelerated_networking": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, "enable_ip_forwarding": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, "network_security_group_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: azure.ValidateResourceIDOrEmpty, }, "primary": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, @@ -207,40 +206,40 @@ func VirtualMachineScaleSetNetworkInterfaceSchema() *schema.Schema { } } -func VirtualMachineScaleSetNetworkInterfaceSchemaForDataSource() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func VirtualMachineScaleSetNetworkInterfaceSchemaForDataSource() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "ip_configuration": virtualMachineScaleSetIPConfigurationSchemaForDataSource(), "dns_servers": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "enable_accelerated_networking": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "enable_ip_forwarding": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "network_security_group_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "primary": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, }, @@ -248,53 +247,53 @@ func VirtualMachineScaleSetNetworkInterfaceSchemaForDataSource() *schema.Schema } } -func virtualMachineScaleSetIPConfigurationSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func virtualMachineScaleSetIPConfigurationSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, // Optional "application_gateway_backend_address_pool_ids": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "application_security_group_ids": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: azure.ValidateResourceID, }, - Set: schema.HashString, + Set: pluginsdk.HashString, MaxItems: 20, }, "load_balancer_backend_address_pool_ids": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "load_balancer_inbound_nat_rules_ids": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "primary": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, @@ -302,13 +301,13 @@ func virtualMachineScaleSetIPConfigurationSchema() *schema.Schema { "public_ip_address": virtualMachineScaleSetPublicIPAddressSchema(), "subnet_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: azure.ValidateResourceID, }, "version": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: string(compute.IPv4), ValidateFunc: validation.StringInSlice([]string{ @@ -321,63 +320,63 @@ func virtualMachineScaleSetIPConfigurationSchema() *schema.Schema { } } -func virtualMachineScaleSetIPConfigurationSchemaForDataSource() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func virtualMachineScaleSetIPConfigurationSchemaForDataSource() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "application_gateway_backend_address_pool_ids": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "application_security_group_ids": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "load_balancer_backend_address_pool_ids": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "load_balancer_inbound_nat_rules_ids": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "primary": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "public_ip_address": virtualMachineScaleSetPublicIPAddressSchemaForDataSource(), "subnet_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "version": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -385,45 +384,45 @@ func virtualMachineScaleSetIPConfigurationSchemaForDataSource() *schema.Schema { } } -func virtualMachineScaleSetPublicIPAddressSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func virtualMachineScaleSetPublicIPAddressSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, // Optional "domain_name_label": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringIsNotEmpty, }, "idle_timeout_in_minutes": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validation.IntBetween(4, 32), }, "ip_tag": { // TODO: does this want to be a Set? - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "tag": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, @@ -435,7 +434,7 @@ func virtualMachineScaleSetPublicIPAddressSchema() *schema.Schema { // $ az feature register --namespace Microsoft.Network --name AllowBringYourOwnPublicIpAddress // $ az provider register -n Microsoft.Network "public_ip_prefix_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: azure.ValidateResourceIDOrEmpty, @@ -445,38 +444,38 @@ func virtualMachineScaleSetPublicIPAddressSchema() *schema.Schema { } } -func virtualMachineScaleSetPublicIPAddressSchemaForDataSource() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func virtualMachineScaleSetPublicIPAddressSchemaForDataSource() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "domain_name_label": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "idle_timeout_in_minutes": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "ip_tag": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "tag": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -484,7 +483,7 @@ func virtualMachineScaleSetPublicIPAddressSchemaForDataSource() *schema.Schema { }, "public_ip_prefix_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -538,16 +537,16 @@ func ExpandVirtualMachineScaleSetNetworkInterface(input []interface{}) (*[]compu } func expandVirtualMachineScaleSetIPConfiguration(raw map[string]interface{}) (*compute.VirtualMachineScaleSetIPConfiguration, error) { - applicationGatewayBackendAddressPoolIdsRaw := raw["application_gateway_backend_address_pool_ids"].(*schema.Set).List() + applicationGatewayBackendAddressPoolIdsRaw := raw["application_gateway_backend_address_pool_ids"].(*pluginsdk.Set).List() applicationGatewayBackendAddressPoolIds := expandIDsToSubResources(applicationGatewayBackendAddressPoolIdsRaw) - applicationSecurityGroupIdsRaw := raw["application_security_group_ids"].(*schema.Set).List() + applicationSecurityGroupIdsRaw := raw["application_security_group_ids"].(*pluginsdk.Set).List() applicationSecurityGroupIds := expandIDsToSubResources(applicationSecurityGroupIdsRaw) - loadBalancerBackendAddressPoolIdsRaw := raw["load_balancer_backend_address_pool_ids"].(*schema.Set).List() + loadBalancerBackendAddressPoolIdsRaw := raw["load_balancer_backend_address_pool_ids"].(*pluginsdk.Set).List() loadBalancerBackendAddressPoolIds := expandIDsToSubResources(loadBalancerBackendAddressPoolIdsRaw) - loadBalancerInboundNatPoolIdsRaw := raw["load_balancer_inbound_nat_rules_ids"].(*schema.Set).List() + loadBalancerInboundNatPoolIdsRaw := raw["load_balancer_inbound_nat_rules_ids"].(*pluginsdk.Set).List() loadBalancerInboundNatPoolIds := expandIDsToSubResources(loadBalancerInboundNatPoolIdsRaw) primary := raw["primary"].(bool) @@ -668,16 +667,16 @@ func ExpandVirtualMachineScaleSetNetworkInterfaceUpdate(input []interface{}) (*[ } func expandVirtualMachineScaleSetIPConfigurationUpdate(raw map[string]interface{}) (*compute.VirtualMachineScaleSetUpdateIPConfiguration, error) { - applicationGatewayBackendAddressPoolIdsRaw := raw["application_gateway_backend_address_pool_ids"].(*schema.Set).List() + applicationGatewayBackendAddressPoolIdsRaw := raw["application_gateway_backend_address_pool_ids"].(*pluginsdk.Set).List() applicationGatewayBackendAddressPoolIds := expandIDsToSubResources(applicationGatewayBackendAddressPoolIdsRaw) - applicationSecurityGroupIdsRaw := raw["application_security_group_ids"].(*schema.Set).List() + applicationSecurityGroupIdsRaw := raw["application_security_group_ids"].(*pluginsdk.Set).List() applicationSecurityGroupIds := expandIDsToSubResources(applicationSecurityGroupIdsRaw) - loadBalancerBackendAddressPoolIdsRaw := raw["load_balancer_backend_address_pool_ids"].(*schema.Set).List() + loadBalancerBackendAddressPoolIdsRaw := raw["load_balancer_backend_address_pool_ids"].(*pluginsdk.Set).List() loadBalancerBackendAddressPoolIds := expandIDsToSubResources(loadBalancerBackendAddressPoolIdsRaw) - loadBalancerInboundNatPoolIdsRaw := raw["load_balancer_inbound_nat_rules_ids"].(*schema.Set).List() + loadBalancerInboundNatPoolIdsRaw := raw["load_balancer_inbound_nat_rules_ids"].(*pluginsdk.Set).List() loadBalancerInboundNatPoolIds := expandIDsToSubResources(loadBalancerInboundNatPoolIdsRaw) primary := raw["primary"].(bool) @@ -871,15 +870,15 @@ func flattenVirtualMachineScaleSetPublicIPAddress(input compute.VirtualMachineSc } } -func VirtualMachineScaleSetDataDiskSchema() *schema.Schema { - return &schema.Schema{ +func VirtualMachineScaleSetDataDiskSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ // TODO: does this want to be a Set? - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "caching": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ string(compute.CachingTypesNone), @@ -889,7 +888,7 @@ func VirtualMachineScaleSetDataDiskSchema() *schema.Schema { }, "create_option": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ string(compute.DiskCreateOptionTypesEmpty), @@ -899,7 +898,7 @@ func VirtualMachineScaleSetDataDiskSchema() *schema.Schema { }, "disk_encryption_set_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, // whilst the API allows updating this value, it's never actually set at Azure's end // presumably this'll take effect once key rotation is supported a few months post-GA? @@ -909,19 +908,19 @@ func VirtualMachineScaleSetDataDiskSchema() *schema.Schema { }, "disk_size_gb": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, ValidateFunc: validation.IntBetween(1, 32767), }, "lun": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, ValidateFunc: validation.IntBetween(0, 2000), // TODO: confirm upper bounds }, "storage_account_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ string(compute.StorageAccountTypesPremiumLRS), @@ -932,21 +931,21 @@ func VirtualMachineScaleSetDataDiskSchema() *schema.Schema { }, "write_accelerator_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, // TODO 3.0 - change this to ultra_ssd_disk_iops_read_write "disk_iops_read_write": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, }, // TODO 3.0 - change this to ultra_ssd_disk_iops_read_write "disk_mbps_read_write": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, }, @@ -1056,15 +1055,15 @@ func FlattenVirtualMachineScaleSetDataDisk(input *[]compute.VirtualMachineScaleS return output } -func VirtualMachineScaleSetOSDiskSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func VirtualMachineScaleSetOSDiskSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Required: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "caching": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ string(compute.CachingTypesNone), @@ -1073,7 +1072,7 @@ func VirtualMachineScaleSetOSDiskSchema() *schema.Schema { }, false), }, "storage_account_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, // whilst this appears in the Update block the API returns this when changing: // Changing property 'osDisk.managedDisk.storageAccountType' is not allowed @@ -1087,14 +1086,14 @@ func VirtualMachineScaleSetOSDiskSchema() *schema.Schema { }, "diff_disk_settings": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, ForceNew: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "option": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ @@ -1106,7 +1105,7 @@ func VirtualMachineScaleSetOSDiskSchema() *schema.Schema { }, "disk_encryption_set_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, // whilst the API allows updating this value, it's never actually set at Azure's end // presumably this'll take effect once key rotation is supported a few months post-GA? @@ -1116,14 +1115,14 @@ func VirtualMachineScaleSetOSDiskSchema() *schema.Schema { }, "disk_size_gb": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validation.IntBetween(0, 4095), }, "write_accelerator_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, @@ -1232,20 +1231,20 @@ func FlattenVirtualMachineScaleSetOSDisk(input *compute.VirtualMachineScaleSetOS } } -func VirtualMachineScaleSetAutomatedOSUpgradePolicySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func VirtualMachineScaleSetAutomatedOSUpgradePolicySchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ // TODO: should these be optional + defaulted? "disable_automatic_rollback": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Required: true, }, "enable_automatic_os_upgrade": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Required: true, }, }, @@ -1288,28 +1287,28 @@ func FlattenVirtualMachineScaleSetAutomaticOSUpgradePolicy(input *compute.Automa } } -func VirtualMachineScaleSetRollingUpgradePolicySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func VirtualMachineScaleSetRollingUpgradePolicySchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Optional: true, ForceNew: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "max_batch_instance_percent": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, }, "max_unhealthy_instance_percent": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, }, "max_unhealthy_upgraded_instance_percent": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, }, "pause_time_between_batches": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: azValidate.ISO8601Duration, }, @@ -1368,20 +1367,20 @@ func FlattenVirtualMachineScaleSetRollingUpgradePolicy(input *compute.RollingUpg } } -func VirtualMachineScaleSetTerminateNotificationSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func VirtualMachineScaleSetTerminateNotificationSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Optional: true, Computed: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Required: true, }, "timeout": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: azValidate.ISO8601Duration, Default: "PT5M", @@ -1430,20 +1429,20 @@ func FlattenVirtualMachineScaleSetScheduledEventsProfile(input *compute.Schedule } } -func VirtualMachineScaleSetAutomaticRepairsPolicySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func VirtualMachineScaleSetAutomaticRepairsPolicySchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Optional: true, Computed: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Required: true, }, "grace_period": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: "PT30M", // this field actually has a range from 30m to 90m, is there a function that can do this validation? @@ -1489,68 +1488,68 @@ func FlattenVirtualMachineScaleSetAutomaticRepairsPolicy(input *compute.Automati } } -func VirtualMachineScaleSetExtensionsSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeSet, +func VirtualMachineScaleSetExtensionsSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeSet, Optional: true, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, "publisher": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, "type_handler_version": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, "auto_upgrade_minor_version": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, "force_update_tag": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "protected_settings": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Sensitive: true, ValidateFunc: validation.StringIsJSON, }, "provision_after_extensions": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "settings": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringIsJSON, - DiffSuppressFunc: structure.SuppressJsonDiff, + DiffSuppressFunc: pluginsdk.SuppressJsonDiff, }, }, }, @@ -1588,7 +1587,7 @@ func expandVirtualMachineScaleSetExtensions(input []interface{}) (extensionProfi } if val, ok := extensionRaw["settings"]; ok && val.(string) != "" { - settings, err := structure.ExpandJsonFromString(val.(string)) + settings, err := pluginsdk.ExpandJsonFromString(val.(string)) if err != nil { return nil, false, fmt.Errorf("failed to parse JSON from `settings`: %+v", err) } @@ -1596,7 +1595,7 @@ func expandVirtualMachineScaleSetExtensions(input []interface{}) (extensionProfi } if val, ok := extensionRaw["protected_settings"]; ok && val.(string) != "" { - protectedSettings, err := structure.ExpandJsonFromString(val.(string)) + protectedSettings, err := pluginsdk.ExpandJsonFromString(val.(string)) if err != nil { return nil, false, fmt.Errorf("failed to parse JSON from `protected_settings`: %+v", err) } @@ -1611,7 +1610,7 @@ func expandVirtualMachineScaleSetExtensions(input []interface{}) (extensionProfi return extensionProfile, hasHealthExtension, nil } -func flattenVirtualMachineScaleSetExtensions(input *compute.VirtualMachineScaleSetExtensionProfile, d *schema.ResourceData) ([]map[string]interface{}, error) { +func flattenVirtualMachineScaleSetExtensions(input *compute.VirtualMachineScaleSetExtensionProfile, d *pluginsdk.ResourceData) ([]map[string]interface{}, error) { result := make([]map[string]interface{}, 0) if input == nil || input.Extensions == nil { return result, nil @@ -1658,7 +1657,7 @@ func flattenVirtualMachineScaleSetExtensions(input *compute.VirtualMachineScaleS } if props.Settings != nil { - extSettingsRaw, err := structure.FlattenJsonToString(props.Settings.(map[string]interface{})) + extSettingsRaw, err := pluginsdk.FlattenJsonToString(props.Settings.(map[string]interface{})) if err != nil { return nil, err } diff --git a/azurerm/internal/services/compute/virtual_machine_scale_set_data_source.go b/azurerm/internal/services/compute/virtual_machine_scale_set_data_source.go index 69e75f340427..1e93f10762da 100644 --- a/azurerm/internal/services/compute/virtual_machine_scale_set_data_source.go +++ b/azurerm/internal/services/compute/virtual_machine_scale_set_data_source.go @@ -4,25 +4,25 @@ import ( "fmt" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceVirtualMachineScaleSet() *schema.Resource { - return &schema.Resource{ +func dataSourceVirtualMachineScaleSet() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceVirtualMachineScaleSetRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.NoZeroValues, }, @@ -34,25 +34,25 @@ func dataSourceVirtualMachineScaleSet() *schema.Resource { "network_interface": VirtualMachineScaleSetNetworkInterfaceSchemaForDataSource(), "identity": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "identity_ids": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "principal_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -62,7 +62,7 @@ func dataSourceVirtualMachineScaleSet() *schema.Resource { } } -func dataSourceVirtualMachineScaleSetRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceVirtualMachineScaleSetRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMScaleSetClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/virtual_machine_scale_set_data_source_test.go b/azurerm/internal/services/compute/virtual_machine_scale_set_data_source_test.go index 4e18f422f2c9..971d7f226b8e 100644 --- a/azurerm/internal/services/compute/virtual_machine_scale_set_data_source_test.go +++ b/azurerm/internal/services/compute/virtual_machine_scale_set_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -16,10 +15,10 @@ func TestAccDataSourceVirtualMachineScaleSet_basicLinux(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basicLinux(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("identity.#").HasValue("1"), check.That(data.ResourceName).Key("identity.0.type").HasValue("SystemAssigned"), check.That(data.ResourceName).Key("identity.0.principal_id").Exists(), @@ -32,10 +31,10 @@ func TestAccDataSourceVirtualMachineScaleSet_basicWindows(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basicWindows(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("identity.#").HasValue("1"), check.That(data.ResourceName).Key("identity.0.type").HasValue("SystemAssigned"), check.That(data.ResourceName).Key("identity.0.principal_id").Exists(), @@ -48,10 +47,10 @@ func TestAccDataSourceVirtualMachineScaleSet_orchestrated(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.orchestrated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("id").Exists(), ), }, diff --git a/azurerm/internal/services/compute/virtual_machine_scale_set_extension_resource.go b/azurerm/internal/services/compute/virtual_machine_scale_set_extension_resource.go index d785dbcda3b8..2b5396efc2a2 100644 --- a/azurerm/internal/services/compute/virtual_machine_scale_set_extension_resource.go +++ b/azurerm/internal/services/compute/virtual_machine_scale_set_extension_resource.go @@ -7,22 +7,20 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/structure" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) // NOTE (also in the docs): this is not intended to be used with the `azurerm_virtual_machine_scale_set` resource -func resourceVirtualMachineScaleSetExtension() *schema.Resource { - return &schema.Resource{ +func resourceVirtualMachineScaleSetExtension() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceVirtualMachineScaleSetExtensionCreate, Read: resourceVirtualMachineScaleSetExtensionRead, Update: resourceVirtualMachineScaleSetExtensionUpdate, @@ -33,86 +31,86 @@ func resourceVirtualMachineScaleSetExtension() *schema.Resource { return err }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, "virtual_machine_scale_set_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.VirtualMachineScaleSetID, }, "publisher": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, "type_handler_version": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, "auto_upgrade_minor_version": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, "force_update_tag": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "protected_settings": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Sensitive: true, ValidateFunc: validation.StringIsJSON, - DiffSuppressFunc: structure.SuppressJsonDiff, + DiffSuppressFunc: pluginsdk.SuppressJsonDiff, }, "provision_after_extensions": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "settings": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringIsJSON, - DiffSuppressFunc: structure.SuppressJsonDiff, + DiffSuppressFunc: pluginsdk.SuppressJsonDiff, }, }, } } -func resourceVirtualMachineScaleSetExtensionCreate(d *schema.ResourceData, meta interface{}) error { +func resourceVirtualMachineScaleSetExtensionCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMScaleSetExtensionsClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -138,7 +136,7 @@ func resourceVirtualMachineScaleSetExtensionCreate(d *schema.ResourceData, meta settings := map[string]interface{}{} if settingsString := d.Get("settings").(string); settingsString != "" { - s, err := structure.ExpandJsonFromString(settingsString) + s, err := pluginsdk.ExpandJsonFromString(settingsString) if err != nil { return fmt.Errorf("unable to parse `settings`: %s", err) } @@ -150,7 +148,7 @@ func resourceVirtualMachineScaleSetExtensionCreate(d *schema.ResourceData, meta protectedSettings := map[string]interface{}{} if protectedSettingsString := d.Get("protected_settings").(string); protectedSettingsString != "" { - ps, err := structure.ExpandJsonFromString(protectedSettingsString) + ps, err := pluginsdk.ExpandJsonFromString(protectedSettingsString) if err != nil { return fmt.Errorf("unable to parse `protected_settings`: %s", err) } @@ -191,7 +189,7 @@ func resourceVirtualMachineScaleSetExtensionCreate(d *schema.ResourceData, meta return resourceVirtualMachineScaleSetExtensionRead(d, meta) } -func resourceVirtualMachineScaleSetExtensionUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceVirtualMachineScaleSetExtensionUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMScaleSetExtensionsClient ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -213,7 +211,7 @@ func resourceVirtualMachineScaleSetExtensionUpdate(d *schema.ResourceData, meta if d.HasChange("protected_settings") { protectedSettings := map[string]interface{}{} if protectedSettingsString := d.Get("protected_settings").(string); protectedSettingsString != "" { - ps, err := structure.ExpandJsonFromString(protectedSettingsString) + ps, err := pluginsdk.ExpandJsonFromString(protectedSettingsString) if err != nil { return fmt.Errorf("unable to parse `protected_settings`: %s", err) } @@ -236,7 +234,7 @@ func resourceVirtualMachineScaleSetExtensionUpdate(d *schema.ResourceData, meta settings := map[string]interface{}{} if settingsString := d.Get("settings").(string); settingsString != "" { - s, err := structure.ExpandJsonFromString(settingsString) + s, err := pluginsdk.ExpandJsonFromString(settingsString) if err != nil { return fmt.Errorf("unable to parse `settings`: %s", err) } @@ -270,7 +268,7 @@ func resourceVirtualMachineScaleSetExtensionUpdate(d *schema.ResourceData, meta return resourceVirtualMachineScaleSetExtensionRead(d, meta) } -func resourceVirtualMachineScaleSetExtensionRead(d *schema.ResourceData, meta interface{}) error { +func resourceVirtualMachineScaleSetExtensionRead(d *pluginsdk.ResourceData, meta interface{}) error { vmssClient := meta.(*clients.Client).Compute.VMScaleSetClient client := meta.(*clients.Client).Compute.VMScaleSetExtensionsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) @@ -318,7 +316,7 @@ func resourceVirtualMachineScaleSetExtensionRead(d *schema.ResourceData, meta in if props.Settings != nil { settingsVal, ok := props.Settings.(map[string]interface{}) if ok { - settingsJson, err := structure.FlattenJsonToString(settingsVal) + settingsJson, err := pluginsdk.FlattenJsonToString(settingsVal) if err != nil { return fmt.Errorf("unable to parse settings from response: %s", err) } @@ -331,7 +329,7 @@ func resourceVirtualMachineScaleSetExtensionRead(d *schema.ResourceData, meta in return nil } -func resourceVirtualMachineScaleSetExtensionDelete(d *schema.ResourceData, meta interface{}) error { +func resourceVirtualMachineScaleSetExtensionDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMScaleSetExtensionsClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/compute/virtual_machine_scale_set_extension_resource_test.go b/azurerm/internal/services/compute/virtual_machine_scale_set_extension_resource_test.go index b9165b09b864..c341c97bcc38 100644 --- a/azurerm/internal/services/compute/virtual_machine_scale_set_extension_resource_test.go +++ b/azurerm/internal/services/compute/virtual_machine_scale_set_extension_resource_test.go @@ -5,12 +5,11 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -20,10 +19,10 @@ type VirtualMachineScaleSetExtensionResource struct { func TestAccVirtualMachineScaleSetExtension_basicLinux(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set_extension", "test") r := VirtualMachineScaleSetExtensionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinux(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -34,10 +33,10 @@ func TestAccVirtualMachineScaleSetExtension_basicLinux(t *testing.T) { func TestAccVirtualMachineScaleSetExtension_basicWindows(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set_extension", "test") r := VirtualMachineScaleSetExtensionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicWindows(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -49,10 +48,10 @@ func TestAccVirtualMachineScaleSetExtension_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set_extension", "test") r := VirtualMachineScaleSetExtensionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinux(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -63,10 +62,10 @@ func TestAccVirtualMachineScaleSetExtension_requiresImport(t *testing.T) { func TestAccVirtualMachineScaleSetExtension_autoUpgradeDisabled(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set_extension", "test") r := VirtualMachineScaleSetExtensionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.autoUpgradeDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -78,10 +77,10 @@ func TestAccVirtualMachineScaleSetExtension_extensionChaining(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set_extension", "first") r := VirtualMachineScaleSetExtensionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionChaining(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -97,17 +96,17 @@ func TestAccVirtualMachineScaleSetExtension_extensionChaining(t *testing.T) { func TestAccVirtualMachineScaleSetExtension_forceUpdateTag(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set_extension", "test") r := VirtualMachineScaleSetExtensionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.forceUpdateTag(data, "first"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.forceUpdateTag(data, "second"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -118,10 +117,10 @@ func TestAccVirtualMachineScaleSetExtension_forceUpdateTag(t *testing.T) { func TestAccVirtualMachineScaleSetExtension_protectedSettings(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set_extension", "test") r := VirtualMachineScaleSetExtensionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.protectedSettings(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -132,10 +131,10 @@ func TestAccVirtualMachineScaleSetExtension_protectedSettings(t *testing.T) { func TestAccVirtualMachineScaleSetExtension_protectedSettingsOnly(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set_extension", "test") r := VirtualMachineScaleSetExtensionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.protectedSettingsOnly(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -146,18 +145,18 @@ func TestAccVirtualMachineScaleSetExtension_protectedSettingsOnly(t *testing.T) func TestAccVirtualMachineScaleSetExtension_updateVersion(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set_extension", "test") r := VirtualMachineScaleSetExtensionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // old version Config: r.updateVersion(data, "1.2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.updateVersion(data, "1.3"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -165,7 +164,7 @@ func TestAccVirtualMachineScaleSetExtension_updateVersion(t *testing.T) { }) } -func (t VirtualMachineScaleSetExtensionResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t VirtualMachineScaleSetExtensionResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.VirtualMachineScaleSetExtensionID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/compute/virtual_machine_scale_set_import.go b/azurerm/internal/services/compute/virtual_machine_scale_set_import.go index 1300581cb8b5..b34f77395e25 100644 --- a/azurerm/internal/services/compute/virtual_machine_scale_set_import.go +++ b/azurerm/internal/services/compute/virtual_machine_scale_set_import.go @@ -4,11 +4,10 @@ import ( "context" "fmt" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) func importOrchestratedVirtualMachineScaleSet(ctx context.Context, d *pluginsdk.ResourceData, meta interface{}) (data []*pluginsdk.ResourceData, err error) { diff --git a/azurerm/internal/services/compute/virtual_machine_scale_set_resource.go b/azurerm/internal/services/compute/virtual_machine_scale_set_resource.go index 54a24e44fa08..8fd55453661f 100644 --- a/azurerm/internal/services/compute/virtual_machine_scale_set_resource.go +++ b/azurerm/internal/services/compute/virtual_machine_scale_set_resource.go @@ -8,23 +8,19 @@ import ( "strings" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - - validate2 "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/structure" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/migration" + validate2 "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" msiparse "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/msi/parse" msivalidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/msi/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -32,8 +28,8 @@ import ( // NOTE: the `azurerm_virtual_machine_scale_set` resource has been superseded by the // `azurerm_linux_virtual_machine_scale_set` and `azurerm_windows_virtual_machine_scale_set` resources // and as such this resource is feature-frozen and new functionality will be added to these new resources instead. -func resourceVirtualMachineScaleSet() *schema.Resource { - return &schema.Resource{ +func resourceVirtualMachineScaleSet() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceVirtualMachineScaleSetCreateUpdate, Read: resourceVirtualMachineScaleSetRead, Update: resourceVirtualMachineScaleSetCreateUpdate, @@ -47,16 +43,16 @@ func resourceVirtualMachineScaleSet() *schema.Resource { // TODO: replace this with an importer which validates the ID during import Importer: pluginsdk.DefaultImporter(), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(60 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(60 * time.Minute), - Delete: schema.DefaultTimeout(60 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(60 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(60 * time.Minute), + Delete: pluginsdk.DefaultTimeout(60 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, @@ -69,14 +65,14 @@ func resourceVirtualMachineScaleSet() *schema.Resource { "zones": azure.SchemaZones(), "identity": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, Computed: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.StringInSlice([]string{ @@ -86,15 +82,15 @@ func resourceVirtualMachineScaleSet() *schema.Resource { }, false), }, "identity_ids": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: msivalidate.UserAssignedIdentityID, }, }, "principal_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -102,26 +98,26 @@ func resourceVirtualMachineScaleSet() *schema.Resource { }, "sku": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, "tier": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, DiffSuppressFunc: suppress.CaseDifference, }, "capacity": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, ValidateFunc: validation.IntAtLeast(0), }, @@ -130,7 +126,7 @@ func resourceVirtualMachineScaleSet() *schema.Resource { }, "license_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, DiffSuppressFunc: suppress.CaseDifference, @@ -141,7 +137,7 @@ func resourceVirtualMachineScaleSet() *schema.Resource { }, "upgrade_policy_mode": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ string(compute.Automatic), @@ -152,46 +148,46 @@ func resourceVirtualMachineScaleSet() *schema.Resource { }, "health_probe_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: azure.ValidateResourceID, }, "automatic_os_upgrade": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, "rolling_upgrade_policy": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "max_batch_instance_percent": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Default: 20, ValidateFunc: validation.IntBetween(5, 100), }, "max_unhealthy_instance_percent": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Default: 20, ValidateFunc: validation.IntBetween(5, 100), }, "max_unhealthy_upgraded_instance_percent": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Default: 20, ValidateFunc: validation.IntBetween(5, 100), }, "pause_time_between_batches": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: "PT0S", ValidateFunc: validate.ISO8601Duration, @@ -202,20 +198,20 @@ func resourceVirtualMachineScaleSet() *schema.Resource { }, "overprovision": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, "single_placement_group": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, ForceNew: true, }, "priority": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ @@ -225,7 +221,7 @@ func resourceVirtualMachineScaleSet() *schema.Resource { }, "eviction_policy": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ @@ -235,32 +231,32 @@ func resourceVirtualMachineScaleSet() *schema.Resource { }, "os_profile": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "computer_name_prefix": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "admin_username": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, "admin_password": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Sensitive: true, ValidateFunc: validation.StringIsNotEmpty, }, "custom_data": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, StateFunc: userDataStateFunc, DiffSuppressFunc: userDataDiffSuppressFunc, @@ -270,27 +266,27 @@ func resourceVirtualMachineScaleSet() *schema.Resource { }, "os_profile_secrets": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "source_vault_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: azure.ValidateResourceID, }, "vault_certificates": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "certificate_url": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "certificate_store": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, }, @@ -302,54 +298,54 @@ func resourceVirtualMachineScaleSet() *schema.Resource { // lintignore:S018 "os_profile_windows_config": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "provision_vm_agent": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, }, "enable_automatic_upgrades": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, }, "winrm": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "protocol": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "certificate_url": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, }, }, }, "additional_unattend_config": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "pass": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "component": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "setting_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "content": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, Sensitive: true, }, @@ -363,29 +359,29 @@ func resourceVirtualMachineScaleSet() *schema.Resource { // lintignore:S018 "os_profile_linux_config": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, Computed: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "disable_password_authentication": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, ForceNew: true, }, "ssh_keys": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "path": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "key_data": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, }, @@ -398,49 +394,49 @@ func resourceVirtualMachineScaleSet() *schema.Resource { // lintignore:S018 "network_profile": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, "primary": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Required: true, }, "accelerated_networking": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, }, "ip_forwarding": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, "network_security_group_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: azure.ValidateResourceID, }, "dns_settings": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "dns_servers": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, }, @@ -449,79 +445,79 @@ func resourceVirtualMachineScaleSet() *schema.Resource { }, "ip_configuration": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, "subnet_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: azure.ValidateResourceID, }, "application_gateway_backend_address_pool_ids": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "application_security_group_ids": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: azure.ValidateResourceID, }, - Set: schema.HashString, + Set: pluginsdk.HashString, MaxItems: 20, }, "load_balancer_backend_address_pool_ids": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "load_balancer_inbound_nat_rules_ids": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "primary": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Required: true, }, "public_ip_address_configuration": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "idle_timeout": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, ValidateFunc: validation.IntBetween(4, 32), }, "domain_name_label": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, }, @@ -536,19 +532,19 @@ func resourceVirtualMachineScaleSet() *schema.Resource { }, "boot_diagnostics": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, "storage_uri": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, }, @@ -557,30 +553,30 @@ func resourceVirtualMachineScaleSet() *schema.Resource { // lintignore:S018 "storage_profile_os_disk": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "image": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "vhd_containers": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "managed_disk_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ValidateFunc: validation.StringInSlice([]string{ @@ -591,18 +587,18 @@ func resourceVirtualMachineScaleSet() *schema.Resource { }, "caching": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, }, "os_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "create_option": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, }, @@ -611,35 +607,35 @@ func resourceVirtualMachineScaleSet() *schema.Resource { }, "storage_profile_data_disk": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "lun": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, }, "create_option": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "caching": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, }, "disk_size_gb": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validate2.DiskSizeGB, }, "managed_disk_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ValidateFunc: validation.StringInSlice([]string{ @@ -654,34 +650,34 @@ func resourceVirtualMachineScaleSet() *schema.Resource { // lintignore:S018 "storage_profile_image_reference": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, Computed: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "publisher": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "offer": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "sku": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "version": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, }, @@ -691,23 +687,23 @@ func resourceVirtualMachineScaleSet() *schema.Resource { // lintignore:S018 "plan": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "publisher": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "product": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, }, @@ -716,58 +712,58 @@ func resourceVirtualMachineScaleSet() *schema.Resource { // lintignore:S018 "extension": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "publisher": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "type_handler_version": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "auto_upgrade_minor_version": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, }, "provision_after_extensions": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, - Set: schema.HashString, + Set: pluginsdk.HashString, }, "settings": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringIsJSON, - DiffSuppressFunc: structure.SuppressJsonDiff, + DiffSuppressFunc: pluginsdk.SuppressJsonDiff, }, "protected_settings": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Sensitive: true, ValidateFunc: validation.StringIsJSON, - DiffSuppressFunc: structure.SuppressJsonDiff, + DiffSuppressFunc: pluginsdk.SuppressJsonDiff, }, }, }, @@ -775,7 +771,7 @@ func resourceVirtualMachineScaleSet() *schema.Resource { }, "proximity_placement_group_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, @@ -793,7 +789,7 @@ func resourceVirtualMachineScaleSet() *schema.Resource { } } -func resourceVirtualMachineScaleSetCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceVirtualMachineScaleSetCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMScaleSetClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -941,7 +937,7 @@ func resourceVirtualMachineScaleSetCreateUpdate(d *schema.ResourceData, meta int return resourceVirtualMachineScaleSetRead(d, meta) } -func resourceVirtualMachineScaleSetRead(d *schema.ResourceData, meta interface{}) error { +func resourceVirtualMachineScaleSetRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMScaleSetClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -1103,7 +1099,7 @@ func resourceVirtualMachineScaleSetRead(d *schema.ResourceData, meta interface{} return tags.FlattenAndSet(d, resp.Tags) } -func resourceVirtualMachineScaleSetDelete(d *schema.ResourceData, meta interface{}) error { +func resourceVirtualMachineScaleSetDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMScaleSetClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -1115,7 +1111,7 @@ func resourceVirtualMachineScaleSetDelete(d *schema.ResourceData, meta interface resGroup := id.ResourceGroup name := id.Path["virtualMachineScaleSets"] - // @ArcturusZhang (mimicking from virtual_machine_resource.go): sending `nil` here omits this value from being sent + // @ArcturusZhang (mimicking from virtual_machine_pluginsdk.go): sending `nil` here omits this value from being sent // which matches the previous behaviour - we're only splitting this out so it's clear why var forceDeletion *bool = nil future, err := client.Delete(ctx, resGroup, name, forceDeletion) @@ -1342,7 +1338,7 @@ func flattenAzureRmVirtualMachineScaleSetNetworkProfile(profile *compute.Virtual } } } - config["application_gateway_backend_address_pool_ids"] = schema.NewSet(schema.HashString, addressPools) + config["application_gateway_backend_address_pool_ids"] = pluginsdk.NewSet(pluginsdk.HashString, addressPools) applicationSecurityGroups := make([]interface{}, 0) if properties.ApplicationSecurityGroups != nil { @@ -1352,7 +1348,7 @@ func flattenAzureRmVirtualMachineScaleSetNetworkProfile(profile *compute.Virtual } } } - config["application_security_group_ids"] = schema.NewSet(schema.HashString, applicationSecurityGroups) + config["application_security_group_ids"] = pluginsdk.NewSet(pluginsdk.HashString, applicationSecurityGroups) if properties.LoadBalancerBackendAddressPools != nil { addressPools := make([]interface{}, 0, len(*properties.LoadBalancerBackendAddressPools)) @@ -1361,7 +1357,7 @@ func flattenAzureRmVirtualMachineScaleSetNetworkProfile(profile *compute.Virtual addressPools = append(addressPools, *v) } } - config["load_balancer_backend_address_pool_ids"] = schema.NewSet(schema.HashString, addressPools) + config["load_balancer_backend_address_pool_ids"] = pluginsdk.NewSet(pluginsdk.HashString, addressPools) } if properties.LoadBalancerInboundNatPools != nil { @@ -1371,7 +1367,7 @@ func flattenAzureRmVirtualMachineScaleSetNetworkProfile(profile *compute.Virtual inboundNatPools = append(inboundNatPools, *v) } } - config["load_balancer_inbound_nat_rules_ids"] = schema.NewSet(schema.HashString, inboundNatPools) + config["load_balancer_inbound_nat_rules_ids"] = pluginsdk.NewSet(pluginsdk.HashString, inboundNatPools) } if properties.Primary != nil { @@ -1409,7 +1405,7 @@ func flattenAzureRmVirtualMachineScaleSetNetworkProfile(profile *compute.Virtual return result } -func flattenAzureRMVirtualMachineScaleSetOsProfile(d *schema.ResourceData, profile *compute.VirtualMachineScaleSetOSProfile) []interface{} { +func flattenAzureRMVirtualMachineScaleSetOsProfile(d *pluginsdk.ResourceData, profile *compute.VirtualMachineScaleSetOSProfile) []interface{} { result := make(map[string]interface{}) result["computer_name_prefix"] = *profile.ComputerNamePrefix @@ -1448,7 +1444,7 @@ func flattenAzureRmVirtualMachineScaleSetStorageProfileOSDisk(profile *compute.V containers = append(containers, container) } } - result["vhd_containers"] = schema.NewSet(schema.HashString, containers) + result["vhd_containers"] = pluginsdk.NewSet(pluginsdk.HashString, containers) if profile.ManagedDisk != nil { result["managed_disk_type"] = string(profile.ManagedDisk.StorageAccountType) @@ -1540,11 +1536,11 @@ func flattenAzureRmVirtualMachineScaleSetExtensionProfile(profile *compute.Virtu provisionAfterExtensions = append(provisionAfterExtensions, provisionAfterExtension) } } - e["provision_after_extensions"] = schema.NewSet(schema.HashString, provisionAfterExtensions) + e["provision_after_extensions"] = pluginsdk.NewSet(pluginsdk.HashString, provisionAfterExtensions) if settings := properties.Settings; settings != nil { settingsVal := settings.(map[string]interface{}) - settingsJson, err := structure.FlattenJsonToString(settingsVal) + settingsJson, err := pluginsdk.FlattenJsonToString(settingsVal) if err != nil { return nil, err } @@ -1579,7 +1575,7 @@ func resourceVirtualMachineScaleSetStorageProfileImageReferenceHash(v interface{ } } - return schema.HashString(buf.String()) + return pluginsdk.HashString(buf.String()) } func resourceVirtualMachineScaleSetStorageProfileOsDiskHash(v interface{}) int { @@ -1589,11 +1585,11 @@ func resourceVirtualMachineScaleSetStorageProfileOsDiskHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) if v, ok := m["vhd_containers"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(*schema.Set).List())) + buf.WriteString(fmt.Sprintf("%s-", v.(*pluginsdk.Set).List())) } } - return schema.HashString(buf.String()) + return pluginsdk.HashString(buf.String()) } func resourceVirtualMachineScaleSetNetworkConfigurationHash(v interface{}) int { @@ -1627,16 +1623,16 @@ func resourceVirtualMachineScaleSetNetworkConfigurationHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%s-", subnetid.(string))) } if appPoolId, ok := config["application_gateway_backend_address_pool_ids"]; ok { - buf.WriteString(fmt.Sprintf("%s-", appPoolId.(*schema.Set).List())) + buf.WriteString(fmt.Sprintf("%s-", appPoolId.(*pluginsdk.Set).List())) } if appSecGroup, ok := config["application_security_group_ids"]; ok { - buf.WriteString(fmt.Sprintf("%s-", appSecGroup.(*schema.Set).List())) + buf.WriteString(fmt.Sprintf("%s-", appSecGroup.(*pluginsdk.Set).List())) } if lbPoolIds, ok := config["load_balancer_backend_address_pool_ids"]; ok { - buf.WriteString(fmt.Sprintf("%s-", lbPoolIds.(*schema.Set).List())) + buf.WriteString(fmt.Sprintf("%s-", lbPoolIds.(*pluginsdk.Set).List())) } if lbInNatRules, ok := config["load_balancer_inbound_nat_rules_ids"]; ok { - buf.WriteString(fmt.Sprintf("%s-", lbInNatRules.(*schema.Set).List())) + buf.WriteString(fmt.Sprintf("%s-", lbInNatRules.(*pluginsdk.Set).List())) } if primary, ok := config["primary"]; ok { buf.WriteString(fmt.Sprintf("%t-", primary.(bool))) @@ -1659,7 +1655,7 @@ func resourceVirtualMachineScaleSetNetworkConfigurationHash(v interface{}) int { } } - return schema.HashString(buf.String()) + return pluginsdk.HashString(buf.String()) } func resourceVirtualMachineScaleSetOsProfileLinuxConfigHash(v interface{}) int { @@ -1681,7 +1677,7 @@ func resourceVirtualMachineScaleSetOsProfileLinuxConfigHash(v interface{}) int { } } - return schema.HashString(buf.String()) + return pluginsdk.HashString(buf.String()) } func resourceVirtualMachineScaleSetOsProfileWindowsConfigHash(v interface{}) int { @@ -1696,7 +1692,7 @@ func resourceVirtualMachineScaleSetOsProfileWindowsConfigHash(v interface{}) int } } - return schema.HashString(buf.String()) + return pluginsdk.HashString(buf.String()) } func resourceVirtualMachineScaleSetExtensionHash(v interface{}) int { @@ -1713,15 +1709,15 @@ func resourceVirtualMachineScaleSetExtensionHash(v interface{}) int { } if v, ok := m["provision_after_extensions"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(*schema.Set).List())) + buf.WriteString(fmt.Sprintf("%s-", v.(*pluginsdk.Set).List())) } // we need to ensure the whitespace is consistent settings := m["settings"].(string) if settings != "" { - expandedSettings, err := structure.ExpandJsonFromString(settings) + expandedSettings, err := pluginsdk.ExpandJsonFromString(settings) if err == nil { - serializedSettings, err := structure.FlattenJsonToString(expandedSettings) + serializedSettings, err := pluginsdk.FlattenJsonToString(expandedSettings) if err == nil { buf.WriteString(fmt.Sprintf("%s-", serializedSettings)) } @@ -1729,10 +1725,10 @@ func resourceVirtualMachineScaleSetExtensionHash(v interface{}) int { } } - return schema.HashString(buf.String()) + return pluginsdk.HashString(buf.String()) } -func expandVirtualMachineScaleSetSku(d *schema.ResourceData) *compute.Sku { +func expandVirtualMachineScaleSetSku(d *pluginsdk.ResourceData) *compute.Sku { skuConfig := d.Get("sku").([]interface{}) config := skuConfig[0].(map[string]interface{}) @@ -1748,7 +1744,7 @@ func expandVirtualMachineScaleSetSku(d *schema.ResourceData) *compute.Sku { return sku } -func expandAzureRmRollingUpgradePolicy(d *schema.ResourceData) *compute.RollingUpgradePolicy { +func expandAzureRmRollingUpgradePolicy(d *pluginsdk.ResourceData) *compute.RollingUpgradePolicy { if config, ok := d.GetOk("rolling_upgrade_policy.0"); ok { policy := config.(map[string]interface{}) return &compute.RollingUpgradePolicy{ @@ -1761,8 +1757,8 @@ func expandAzureRmRollingUpgradePolicy(d *schema.ResourceData) *compute.RollingU return nil } -func expandAzureRmVirtualMachineScaleSetNetworkProfile(d *schema.ResourceData) *compute.VirtualMachineScaleSetNetworkProfile { - scaleSetNetworkProfileConfigs := d.Get("network_profile").(*schema.Set).List() +func expandAzureRmVirtualMachineScaleSetNetworkProfile(d *pluginsdk.ResourceData) *compute.VirtualMachineScaleSetNetworkProfile { + scaleSetNetworkProfileConfigs := d.Get("network_profile").(*pluginsdk.Set).List() networkProfileConfig := make([]compute.VirtualMachineScaleSetNetworkConfiguration, 0, len(scaleSetNetworkProfileConfigs)) for _, npProfileConfig := range scaleSetNetworkProfileConfigs { @@ -1810,7 +1806,7 @@ func expandAzureRmVirtualMachineScaleSetNetworkProfile(d *schema.ResourceData) * ipConfiguration.Primary = &primary if v := ipconfig["application_gateway_backend_address_pool_ids"]; v != nil { - pools := v.(*schema.Set).List() + pools := v.(*pluginsdk.Set).List() resources := make([]compute.SubResource, 0, len(pools)) for _, p := range pools { id := p.(string) @@ -1822,7 +1818,7 @@ func expandAzureRmVirtualMachineScaleSetNetworkProfile(d *schema.ResourceData) * } if v := ipconfig["application_security_group_ids"]; v != nil { - asgs := v.(*schema.Set).List() + asgs := v.(*pluginsdk.Set).List() resources := make([]compute.SubResource, 0, len(asgs)) for _, p := range asgs { id := p.(string) @@ -1834,7 +1830,7 @@ func expandAzureRmVirtualMachineScaleSetNetworkProfile(d *schema.ResourceData) * } if v := ipconfig["load_balancer_backend_address_pool_ids"]; v != nil { - pools := v.(*schema.Set).List() + pools := v.(*pluginsdk.Set).List() resources := make([]compute.SubResource, 0, len(pools)) for _, p := range pools { id := p.(string) @@ -1846,7 +1842,7 @@ func expandAzureRmVirtualMachineScaleSetNetworkProfile(d *schema.ResourceData) * } if v := ipconfig["load_balancer_inbound_nat_rules_ids"]; v != nil { - rules := v.(*schema.Set).List() + rules := v.(*pluginsdk.Set).List() rulesResources := make([]compute.SubResource, 0, len(rules)) for _, m := range rules { id := m.(string) @@ -1911,7 +1907,7 @@ func expandAzureRmVirtualMachineScaleSetNetworkProfile(d *schema.ResourceData) * } } -func expandAzureRMVirtualMachineScaleSetsOsProfile(d *schema.ResourceData) *compute.VirtualMachineScaleSetOSProfile { +func expandAzureRMVirtualMachineScaleSetsOsProfile(d *pluginsdk.ResourceData) *compute.VirtualMachineScaleSetOSProfile { osProfileConfigs := d.Get("os_profile").([]interface{}) osProfileConfig := osProfileConfigs[0].(map[string]interface{}) @@ -1955,7 +1951,7 @@ func expandAzureRMVirtualMachineScaleSetsOsProfile(d *schema.ResourceData) *comp return osProfile } -func expandAzureRMVirtualMachineScaleSetsDiagnosticProfile(d *schema.ResourceData) compute.DiagnosticsProfile { +func expandAzureRMVirtualMachineScaleSetsDiagnosticProfile(d *pluginsdk.ResourceData) compute.DiagnosticsProfile { bootDiagnosticConfigs := d.Get("boot_diagnostics").([]interface{}) bootDiagnosticConfig := bootDiagnosticConfigs[0].(map[string]interface{}) @@ -1974,7 +1970,7 @@ func expandAzureRMVirtualMachineScaleSetsDiagnosticProfile(d *schema.ResourceDat return diagnosticsProfile } -func expandAzureRmVirtualMachineScaleSetIdentity(d *schema.ResourceData) *compute.VirtualMachineScaleSetIdentity { +func expandAzureRmVirtualMachineScaleSetIdentity(d *pluginsdk.ResourceData) *compute.VirtualMachineScaleSetIdentity { v := d.Get("identity") identities := v.([]interface{}) identity := identities[0].(map[string]interface{}) @@ -1996,13 +1992,13 @@ func expandAzureRmVirtualMachineScaleSetIdentity(d *schema.ResourceData) *comput return &vmssIdentity } -func expandAzureRMVirtualMachineScaleSetsStorageProfileOsDisk(d *schema.ResourceData) (*compute.VirtualMachineScaleSetOSDisk, error) { - osDiskConfigs := d.Get("storage_profile_os_disk").(*schema.Set).List() +func expandAzureRMVirtualMachineScaleSetsStorageProfileOsDisk(d *pluginsdk.ResourceData) (*compute.VirtualMachineScaleSetOSDisk, error) { + osDiskConfigs := d.Get("storage_profile_os_disk").(*pluginsdk.Set).List() osDiskConfig := osDiskConfigs[0].(map[string]interface{}) name := osDiskConfig["name"].(string) image := osDiskConfig["image"].(string) - vhd_containers := osDiskConfig["vhd_containers"].(*schema.Set).List() + vhd_containers := osDiskConfig["vhd_containers"].(*pluginsdk.Set).List() caching := osDiskConfig["caching"].(string) osType := osDiskConfig["os_type"].(string) createOption := osDiskConfig["create_option"].(string) @@ -2059,7 +2055,7 @@ func expandAzureRMVirtualMachineScaleSetsStorageProfileOsDisk(d *schema.Resource return osDisk, nil } -func expandAzureRMVirtualMachineScaleSetsStorageProfileDataDisk(d *schema.ResourceData) *[]compute.VirtualMachineScaleSetDataDisk { +func expandAzureRMVirtualMachineScaleSetsStorageProfileDataDisk(d *pluginsdk.ResourceData) *[]compute.VirtualMachineScaleSetDataDisk { disks := d.Get("storage_profile_data_disk").([]interface{}) dataDisks := make([]compute.VirtualMachineScaleSetDataDisk, 0, len(disks)) for _, diskConfig := range disks { @@ -2099,8 +2095,8 @@ func expandAzureRMVirtualMachineScaleSetsStorageProfileDataDisk(d *schema.Resour return &dataDisks } -func expandAzureRmVirtualMachineScaleSetStorageProfileImageReference(d *schema.ResourceData) (*compute.ImageReference, error) { - storageImageRefs := d.Get("storage_profile_image_reference").(*schema.Set).List() +func expandAzureRmVirtualMachineScaleSetStorageProfileImageReference(d *pluginsdk.ResourceData) (*compute.ImageReference, error) { + storageImageRefs := d.Get("storage_profile_image_reference").(*pluginsdk.Set).List() storageImageRef := storageImageRefs[0].(map[string]interface{}) @@ -2129,8 +2125,8 @@ func expandAzureRmVirtualMachineScaleSetStorageProfileImageReference(d *schema.R return &imageReference, nil } -func expandAzureRmVirtualMachineScaleSetOsProfileLinuxConfig(d *schema.ResourceData) *compute.LinuxConfiguration { - osProfilesLinuxConfig := d.Get("os_profile_linux_config").(*schema.Set).List() +func expandAzureRmVirtualMachineScaleSetOsProfileLinuxConfig(d *pluginsdk.ResourceData) *compute.LinuxConfiguration { + osProfilesLinuxConfig := d.Get("os_profile_linux_config").(*pluginsdk.Set).List() linuxConfig := osProfilesLinuxConfig[0].(map[string]interface{}) disablePasswordAuth := linuxConfig["disable_password_authentication"].(bool) @@ -2163,8 +2159,8 @@ func expandAzureRmVirtualMachineScaleSetOsProfileLinuxConfig(d *schema.ResourceD return config } -func expandAzureRmVirtualMachineScaleSetOsProfileWindowsConfig(d *schema.ResourceData) *compute.WindowsConfiguration { - osProfilesWindowsConfig := d.Get("os_profile_windows_config").(*schema.Set).List() +func expandAzureRmVirtualMachineScaleSetOsProfileWindowsConfig(d *pluginsdk.ResourceData) *compute.WindowsConfiguration { + osProfilesWindowsConfig := d.Get("os_profile_windows_config").(*pluginsdk.Set).List() osProfileConfig := osProfilesWindowsConfig[0].(map[string]interface{}) config := &compute.WindowsConfiguration{} @@ -2230,8 +2226,8 @@ func expandAzureRmVirtualMachineScaleSetOsProfileWindowsConfig(d *schema.Resourc return config } -func expandAzureRmVirtualMachineScaleSetOsProfileSecrets(d *schema.ResourceData) *[]compute.VaultSecretGroup { - secretsConfig := d.Get("os_profile_secrets").(*schema.Set).List() +func expandAzureRmVirtualMachineScaleSetOsProfileSecrets(d *pluginsdk.ResourceData) *[]compute.VaultSecretGroup { + secretsConfig := d.Get("os_profile_secrets").(*pluginsdk.Set).List() secrets := make([]compute.VaultSecretGroup, 0, len(secretsConfig)) for _, secretConfig := range secretsConfig { @@ -2269,8 +2265,8 @@ func expandAzureRmVirtualMachineScaleSetOsProfileSecrets(d *schema.ResourceData) return &secrets } -func expandAzureRMVirtualMachineScaleSetExtensions(d *schema.ResourceData) (*compute.VirtualMachineScaleSetExtensionProfile, error) { - extensions := d.Get("extension").(*schema.Set).List() +func expandAzureRMVirtualMachineScaleSetExtensions(d *pluginsdk.ResourceData) (*compute.VirtualMachineScaleSetExtensionProfile, error) { + extensions := d.Get("extension").(*pluginsdk.Set).List() resources := make([]compute.VirtualMachineScaleSetExtension, 0, len(extensions)) for _, e := range extensions { config := e.(map[string]interface{}) @@ -2294,7 +2290,7 @@ func expandAzureRMVirtualMachineScaleSetExtensions(d *schema.ResourceData) (*com } if a := config["provision_after_extensions"]; a != nil { - provision_after_extensions := config["provision_after_extensions"].(*schema.Set).List() + provision_after_extensions := config["provision_after_extensions"].(*pluginsdk.Set).List() if len(provision_after_extensions) > 0 { var provisionAfterExtensions []string for _, a := range provision_after_extensions { @@ -2306,7 +2302,7 @@ func expandAzureRMVirtualMachineScaleSetExtensions(d *schema.ResourceData) (*com } if s := config["settings"].(string); s != "" { - settings, err := structure.ExpandJsonFromString(s) + settings, err := pluginsdk.ExpandJsonFromString(s) if err != nil { return nil, fmt.Errorf("unable to parse settings: %+v", err) } @@ -2314,7 +2310,7 @@ func expandAzureRMVirtualMachineScaleSetExtensions(d *schema.ResourceData) (*com } if s := config["protected_settings"].(string); s != "" { - protectedSettings, err := structure.ExpandJsonFromString(s) + protectedSettings, err := pluginsdk.ExpandJsonFromString(s) if err != nil { return nil, fmt.Errorf("unable to parse protected_settings: %+v", err) } @@ -2329,8 +2325,8 @@ func expandAzureRMVirtualMachineScaleSetExtensions(d *schema.ResourceData) (*com }, nil } -func expandAzureRmVirtualMachineScaleSetPlan(d *schema.ResourceData) *compute.Plan { - planConfigs := d.Get("plan").(*schema.Set).List() +func expandAzureRmVirtualMachineScaleSetPlan(d *pluginsdk.ResourceData) *compute.Plan { + planConfigs := d.Get("plan").(*pluginsdk.Set).List() planConfig := planConfigs[0].(map[string]interface{}) @@ -2356,7 +2352,7 @@ func flattenAzureRmVirtualMachineScaleSetPlan(plan *compute.Plan) []interface{} } // When upgrade_policy_mode is not Rolling, we will just ignore rolling_upgrade_policy (returns true). -func azureRmVirtualMachineScaleSetSuppressRollingUpgradePolicyDiff(k, _, new string, d *schema.ResourceData) bool { +func azureRmVirtualMachineScaleSetSuppressRollingUpgradePolicyDiff(k, _, new string, d *pluginsdk.ResourceData) bool { if k == "rolling_upgrade_policy.#" && new == "0" { return strings.ToLower(d.Get("upgrade_policy_mode").(string)) != "rolling" } @@ -2364,7 +2360,7 @@ func azureRmVirtualMachineScaleSetSuppressRollingUpgradePolicyDiff(k, _, new str } // Make sure rolling_upgrade_policy is default value when upgrade_policy_mode is not Rolling. -func azureRmVirtualMachineScaleSetCustomizeDiff(ctx context.Context, d *schema.ResourceDiff, _ interface{}) error { +func azureRmVirtualMachineScaleSetCustomizeDiff(ctx context.Context, d *pluginsdk.ResourceDiff, _ interface{}) error { mode := d.Get("upgrade_policy_mode").(string) if strings.ToLower(mode) != "rolling" { if policyRaw, ok := d.GetOk("rolling_upgrade_policy.0"); ok { diff --git a/azurerm/internal/services/compute/virtual_machine_scale_set_resource_test.go b/azurerm/internal/services/compute/virtual_machine_scale_set_resource_test.go index e9ccab88fc12..b684768f44c0 100644 --- a/azurerm/internal/services/compute/virtual_machine_scale_set_resource_test.go +++ b/azurerm/internal/services/compute/virtual_machine_scale_set_resource_test.go @@ -7,14 +7,13 @@ import ( "testing" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -25,10 +24,10 @@ func TestAccVirtualMachineScaleSet_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), // testing default scaleset values check.That(data.ResourceName).Key("single_placement_group").HasValue("true"), @@ -42,10 +41,10 @@ func TestAccVirtualMachineScaleSet_basic(t *testing.T) { func TestAccVirtualMachineScaleSet_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -60,10 +59,10 @@ func TestAccVirtualMachineScaleSet_evictionPolicyDelete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.evictionPolicyDelete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("eviction_policy").HasValue("Delete"), ), @@ -76,10 +75,10 @@ func TestAccVirtualMachineScaleSet_standardSSD(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.standardSSD(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -91,10 +90,10 @@ func TestAccVirtualMachineScaleSet_withPPG(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.withPPG(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("proximity_placement_group_id").Exists(), ), @@ -106,10 +105,10 @@ func TestAccVirtualMachineScaleSet_basicPublicIP(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicPublicIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -121,17 +120,17 @@ func TestAccVirtualMachineScaleSet_basicPublicIP_simpleUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicEmptyPublicIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("os_profile.0.admin_password"), { Config: r.basicEmptyPublicIP_updated_tags(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -143,17 +142,17 @@ func TestAccVirtualMachineScaleSet_updateNetworkProfile(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicEmptyPublicIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("os_profile.0.admin_password"), { Config: r.basicEmptyNetworkProfile_true_ipforwarding(data), - Check: resource.ComposeTestCheckFunc(), + Check: acceptance.ComposeTestCheckFunc(), }, data.ImportStep("os_profile.0.admin_password"), }) @@ -163,17 +162,17 @@ func TestAccVirtualMachineScaleSet_updateNetworkProfile_ipconfiguration_dns_name data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicEmptyPublicIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("os_profile.0.admin_password"), { Config: r.basicEmptyPublicIP_updatedDNS_label(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -186,17 +185,17 @@ func TestAccVirtualMachineScaleSet_verify_key_data_changed(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.linux(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("os_profile.0.admin_password"), { Config: r.linuxKeyDataUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -208,10 +207,10 @@ func TestAccVirtualMachineScaleSet_basicApplicationSecurity(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicApplicationSecurity(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -223,10 +222,10 @@ func TestAccVirtualMachineScaleSet_basicAcceleratedNetworking(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicAcceleratedNetworking(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -238,10 +237,10 @@ func TestAccVirtualMachineScaleSet_basicIPForwarding(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicIPForwarding(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -253,10 +252,10 @@ func TestAccVirtualMachineScaleSet_basicDNSSettings(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicDNSSettings(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -268,10 +267,10 @@ func TestAccVirtualMachineScaleSet_bootDiagnostic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.bootDiagnostic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("boot_diagnostics.0.enabled").HasValue("true"), ), @@ -283,10 +282,10 @@ func TestAccVirtualMachineScaleSet_networkSecurityGroup(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkSecurityGroup(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -297,10 +296,10 @@ func TestAccVirtualMachineScaleSet_basicWindows(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicWindows(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), // single placement group should default to true @@ -314,10 +313,10 @@ func TestAccVirtualMachineScaleSet_singlePlacementGroupFalse(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.singlePlacementGroupFalse(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("single_placement_group").HasValue("false"), ), @@ -329,16 +328,16 @@ func TestAccVirtualMachineScaleSet_linuxUpdated(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.linux(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Config: r.linuxUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -349,16 +348,16 @@ func TestAccVirtualMachineScaleSet_customDataUpdated(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.linux(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Config: r.linuxCustomDataUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -369,10 +368,10 @@ func TestAccVirtualMachineScaleSet_basicLinux_managedDisk(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinux_managedDisk(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -384,10 +383,10 @@ func TestAccVirtualMachineScaleSet_basicWindows_managedDisk(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicWindows_managedDisk(data, "Standard_D1_v2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -398,16 +397,16 @@ func TestAccVirtualMachineScaleSet_basicWindows_managedDisk_resize(t *testing.T) data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicWindows_managedDisk(data, "Standard_D1_v2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Config: r.basicWindows_managedDisk(data, "Standard_D2_v2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -418,10 +417,10 @@ func TestAccVirtualMachineScaleSet_basicLinux_managedDiskNoName(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinux_managedDiskNoName(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -432,7 +431,7 @@ func TestAccVirtualMachineScaleSet_basicLinux_disappears(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ data.DisappearsStep(acceptance.DisappearsStepData{ Config: r.basic, TestResource: r, @@ -444,10 +443,10 @@ func TestAccVirtualMachineScaleSet_planManagedDisk(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.planManagedDisk(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -458,10 +457,10 @@ func TestAccVirtualMachineScaleSet_applicationGateway(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.applicationGatewayTemplate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), data.CheckWithClient(r.hasApplicationGateway), ), @@ -473,10 +472,10 @@ func TestAccVirtualMachineScaleSet_loadBalancer(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.loadBalancerTemplate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), data.CheckWithClient(r.hasLoadBalancer), ), @@ -489,10 +488,10 @@ func TestAccVirtualMachineScaleSet_loadBalancerManagedDataDisks(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.loadBalancerTemplateManagedDataDisks(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("storage_profile_data_disk.#").HasValue("1"), ), @@ -504,10 +503,10 @@ func TestAccVirtualMachineScaleSet_overprovision(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.overProvisionTemplate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("overprovision").HasValue("false"), ), @@ -520,10 +519,10 @@ func TestAccVirtualMachineScaleSet_priority(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.priorityTemplate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("priority").HasValue("Low"), check.That(data.ResourceName).Key("eviction_policy").HasValue("Deallocate"), @@ -536,14 +535,14 @@ func TestAccVirtualMachineScaleSet_SystemAssignedMSI(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.systemAssignedMSI(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.0.type").HasValue("SystemAssigned"), check.That(data.ResourceName).Key("identity.0.identity_ids.#").HasValue("0"), - resource.TestMatchResourceAttr(data.ResourceName, "identity.0.principal_id", validate.UUIDRegExp), + acceptance.TestMatchResourceAttr(data.ResourceName, "identity.0.principal_id", validate.UUIDRegExp), ), }, }) @@ -553,10 +552,10 @@ func TestAccVirtualMachineScaleSet_UserAssignedMSI(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.userAssignedMSI(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.0.type").HasValue("UserAssigned"), check.That(data.ResourceName).Key("identity.0.identity_ids.#").HasValue("1"), @@ -570,14 +569,14 @@ func TestAccVirtualMachineScaleSet_multipleAssignedMSI(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.multipleAssignedMSI(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.0.type").HasValue("SystemAssigned, UserAssigned"), check.That(data.ResourceName).Key("identity.0.identity_ids.#").HasValue("1"), - resource.TestMatchResourceAttr(data.ResourceName, "identity.0.principal_id", validate.UUIDRegExp), + acceptance.TestMatchResourceAttr(data.ResourceName, "identity.0.principal_id", validate.UUIDRegExp), ), }, }) @@ -587,10 +586,10 @@ func TestAccVirtualMachineScaleSet_extension(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionTemplate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -602,16 +601,16 @@ func TestAccVirtualMachineScaleSet_extensionUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionTemplate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Config: r.extensionTemplateUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -622,10 +621,10 @@ func TestAccVirtualMachineScaleSet_multipleExtensions(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.multipleExtensionsTemplate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -637,10 +636,10 @@ func TestAccVirtualMachineScaleSet_multipleExtensions_provision_after_extension( data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.multipleExtensionsTemplate_provision_after_extension(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -652,7 +651,7 @@ func TestAccVirtualMachineScaleSet_osDiskTypeConflict(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.osDiskTypeConflict(data), ExpectError: regexp.MustCompile("Conflict between `vhd_containers`"), @@ -664,10 +663,10 @@ func TestAccVirtualMachineScaleSet_NonStandardCasing(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.nonStandardCasing(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -683,10 +682,10 @@ func TestAccVirtualMachineScaleSet_importLinux(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.linux(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -701,10 +700,10 @@ func TestAccVirtualMachineScaleSet_multipleNetworkProfiles(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.multipleNetworkProfiles(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -715,10 +714,10 @@ func TestAccVirtualMachineScaleSet_AutoUpdates(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.rollingAutoUpdates(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -729,26 +728,26 @@ func TestAccVirtualMachineScaleSet_upgradeModeUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.upgradeModeUpdate(data, "Manual"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("upgrade_policy_mode").HasValue("Manual"), - resource.TestCheckNoResourceAttr(data.ResourceName, "rolling_upgrade_policy.#"), + acceptance.TestCheckNoResourceAttr(data.ResourceName, "rolling_upgrade_policy.#"), ), }, { Config: r.upgradeModeUpdate(data, "Automatic"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("upgrade_policy_mode").HasValue("Automatic"), - resource.TestCheckNoResourceAttr(data.ResourceName, "rolling_upgrade_policy.#"), + acceptance.TestCheckNoResourceAttr(data.ResourceName, "rolling_upgrade_policy.#"), ), }, { Config: r.upgradeModeUpdate(data, "Rolling"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("upgrade_policy_mode").HasValue("Rolling"), check.That(data.ResourceName).Key("rolling_upgrade_policy.#").HasValue("1"), @@ -760,7 +759,7 @@ func TestAccVirtualMachineScaleSet_upgradeModeUpdate(t *testing.T) { { PreConfig: func() { time.Sleep(1 * time.Minute) }, // VM Scale Set updates are not allowed while there is a Rolling Upgrade in progress. Config: r.upgradeModeUpdate(data, "Automatic"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("upgrade_policy_mode").HasValue("Automatic"), check.That(data.ResourceName).Key("rolling_upgrade_policy.#").HasValue("1"), @@ -771,7 +770,7 @@ func TestAccVirtualMachineScaleSet_upgradeModeUpdate(t *testing.T) { }, { Config: r.upgradeModeUpdate(data, "Manual"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("upgrade_policy_mode").HasValue("Manual"), check.That(data.ResourceName).Key("rolling_upgrade_policy.#").HasValue("1"), @@ -787,10 +786,10 @@ func TestAccVirtualMachineScaleSet_importBasic_managedDisk_withZones(t *testing. data := acceptance.BuildTestData(t, "azurerm_virtual_machine_scale_set", "test") r := VirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinux_managedDisk_withZones(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -798,7 +797,7 @@ func TestAccVirtualMachineScaleSet_importBasic_managedDisk_withZones(t *testing. }) } -func (VirtualMachineScaleSetResource) Destroy(ctx context.Context, client *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (VirtualMachineScaleSetResource) Destroy(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.VirtualMachineScaleSetID(state.ID) if err != nil { return nil, err @@ -818,7 +817,7 @@ func (VirtualMachineScaleSetResource) Destroy(ctx context.Context, client *clien return utils.Bool(true), nil } -func (VirtualMachineScaleSetResource) hasLoadBalancer(ctx context.Context, client *clients.Client, state *terraform.InstanceState) error { +func (VirtualMachineScaleSetResource) hasLoadBalancer(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) error { id, err := parse.VirtualMachineScaleSetID(state.ID) if err != nil { return err @@ -856,7 +855,7 @@ func (VirtualMachineScaleSetResource) hasLoadBalancer(ctx context.Context, clien return fmt.Errorf("load balancer configuration was missing") } -func (VirtualMachineScaleSetResource) hasApplicationGateway(ctx context.Context, client *clients.Client, state *terraform.InstanceState) error { +func (VirtualMachineScaleSetResource) hasApplicationGateway(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) error { id, err := parse.VirtualMachineScaleSetID(state.ID) if err != nil { return err @@ -894,7 +893,7 @@ func (VirtualMachineScaleSetResource) hasApplicationGateway(ctx context.Context, return fmt.Errorf("application gateway configuration was missing") } -func (t VirtualMachineScaleSetResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t VirtualMachineScaleSetResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := azure.ParseAzureResourceID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/compute/virtual_machine_scale_set_update.go b/azurerm/internal/services/compute/virtual_machine_scale_set_update.go index 6d456c494f77..e046d3c280cf 100644 --- a/azurerm/internal/services/compute/virtual_machine_scale_set_update.go +++ b/azurerm/internal/services/compute/virtual_machine_scale_set_update.go @@ -47,7 +47,9 @@ func (metadata virtualMachineScaleSetUpdateMetaData) performUpdate(ctx context.C upgradeMode := metadata.Existing.VirtualMachineScaleSetProperties.UpgradePolicy.Mode if userWantsToRollInstances { - if upgradeMode == compute.Automatic { + // If the updated image version is not "latest" and upgrade mode is automatic then azure will roll the instances automatically. + // Calling upgradeInstancesForAutomaticUpgradePolicy() in this case will cause an error. + if upgradeMode == compute.Automatic && *update.VirtualMachineProfile.StorageProfile.ImageReference.Version == "latest" { if err := metadata.upgradeInstancesForAutomaticUpgradePolicy(ctx); err != nil { return err } @@ -86,7 +88,7 @@ func (metadata virtualMachineScaleSetUpdateMetaData) updateVmss(ctx context.Cont log.Printf("[DEBUG] Updating %s Virtual Machine Scale Set %q (Resource Group %q)..", metadata.OSType, id.Name, id.ResourceGroup) future, err := client.Update(ctx, id.ResourceGroup, id.Name, update) if err != nil { - return fmt.Errorf("Error updating L%sinux Virtual Machine Scale Set %q (Resource Group %q): %+v", metadata.OSType, id.Name, id.ResourceGroup, err) + return fmt.Errorf("Error updating %s Virtual Machine Scale Set %q (Resource Group %q): %+v", metadata.OSType, id.Name, id.ResourceGroup, err) } log.Printf("[DEBUG] Waiting for update of %s Virtual Machine Scale Set %q (Resource Group %q)..", metadata.OSType, id.Name, id.ResourceGroup) diff --git a/azurerm/internal/services/compute/virtual_machine_unmanaged_disks_resource_test.go b/azurerm/internal/services/compute/virtual_machine_unmanaged_disks_resource_test.go index dfdc5e9ddc35..3af7ac81d33c 100644 --- a/azurerm/internal/services/compute/virtual_machine_unmanaged_disks_resource_test.go +++ b/azurerm/internal/services/compute/virtual_machine_unmanaged_disks_resource_test.go @@ -5,7 +5,6 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -14,10 +13,10 @@ func TestAccVirtualMachine_basicLinuxMachine(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinuxMachine(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -32,11 +31,11 @@ func TestAccVirtualMachine_basicLinuxMachine_storageBlob_attach(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinuxMachine(data), Destroy: false, - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -46,7 +45,7 @@ func TestAccVirtualMachine_basicLinuxMachine_storageBlob_attach(t *testing.T) { }, { Config: r.basicLinuxMachine_storageBlob_attach(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -57,10 +56,10 @@ func TestAccVirtualMachine_basicLinuxMachineSSHOnly(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinuxMachineSSHOnly(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -71,7 +70,7 @@ func TestAccVirtualMachine_basicLinuxMachine_disappears(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ data.DisappearsStep(acceptance.DisappearsStepData{ Config: r.basicLinuxMachine, TestResource: r, @@ -83,15 +82,15 @@ func TestAccVirtualMachine_basicLinuxMachineUseExistingOsDiskImage(t *testing.T) data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinuxMachineUseExistingOsDiskImage(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).ExistsInAzure(r), data.CheckWithClientForResource(r.unmanagedDiskExistsInContainer("myosdisk1.vhd", true), "azurerm_storage_container.test"), data.CheckWithClientForResource(r.unmanagedDiskExistsInContainer("mirrorosdisk.vhd", true), "azurerm_storage_container.test"), - resource.TestMatchResourceAttr("azurerm_virtual_machine.mirror", "storage_os_disk.0.image_uri", regexp.MustCompile("myosdisk1.vhd$")), + acceptance.TestMatchResourceAttr("azurerm_virtual_machine.mirror", "storage_os_disk.0.image_uri", regexp.MustCompile("myosdisk1.vhd$")), ), }, }) @@ -101,10 +100,10 @@ func TestAccVirtualMachine_withDataDisk(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.withDataDisk(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -115,10 +114,10 @@ func TestAccVirtualMachine_tags(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinuxMachine(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("tags.%").HasValue("2"), check.That(data.ResourceName).Key("tags.environment").HasValue("Production"), @@ -128,7 +127,7 @@ func TestAccVirtualMachine_tags(t *testing.T) { { Config: r.basicLinuxMachineUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("tags.%").HasValue("1"), check.That(data.ResourceName).Key("tags.environment").HasValue("Production"), @@ -143,17 +142,17 @@ func TestAccVirtualMachine_updateMachineSize(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinuxMachine(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("vm_size").HasValue("Standard_D1_v2"), ), }, { Config: r.updatedLinuxMachine(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("vm_size").HasValue("Standard_D2_v2"), ), @@ -165,10 +164,10 @@ func TestAccVirtualMachine_basicWindowsMachine(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicWindowsMachine(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -179,10 +178,10 @@ func TestAccVirtualMachine_windowsUnattendedConfig(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.windowsUnattendedConfig(data, "Standard_D1_v2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -192,16 +191,16 @@ func TestAccVirtualMachine_windowsUnattendedConfig(t *testing.T) { func TestAccVirtualMachine_windowsMachineResize(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.windowsUnattendedConfig(data, "Standard_D1_v2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Config: r.windowsUnattendedConfig(data, "Standard_D2_v2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -212,10 +211,10 @@ func TestAccVirtualMachine_diagnosticsProfile(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diagnosticsProfile(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -226,10 +225,10 @@ func TestAccVirtualMachine_winRMConfig(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.winRMConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -239,16 +238,16 @@ func TestAccVirtualMachine_winRMConfig(t *testing.T) { func TestAccVirtualMachine_deleteVHDOptOut(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.withDataDisk(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Config: r.basicLinuxMachineDeleteVM(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(r.unmanagedDiskExistsInContainer("myosdisk1.vhd", true), "azurerm_storage_container.test"), data.CheckWithClientForResource(r.unmanagedDiskExistsInContainer("mydatadisk1.vhd", true), "azurerm_storage_container.test"), ), @@ -260,16 +259,16 @@ func TestAccVirtualMachine_deleteVHDOptIn(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinuxMachineDestroyDisksBefore(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Config: r.basicLinuxMachineDestroyDisksAfter(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(r.unmanagedDiskExistsInContainer("myosdisk1.vhd", false), "azurerm_storage_container.test"), data.CheckWithClientForResource(r.unmanagedDiskExistsInContainer("mydatadisk1.vhd", false), "azurerm_storage_container.test"), ), @@ -280,16 +279,16 @@ func TestAccVirtualMachine_deleteVHDOptIn(t *testing.T) { func TestAccVirtualMachine_ChangeComputerName(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.machineNameBeforeUpdate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Config: r.updateMachineName(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -300,17 +299,17 @@ func TestAccVirtualMachine_ChangeAvailabilitySet(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.withAvailabilitySet(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Config: r.updateAvailabilitySet(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -321,16 +320,16 @@ func TestAccVirtualMachine_changeStorageImageReference(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinuxMachineStorageImageBefore(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Config: r.basicLinuxMachineStorageImageAfter(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -341,17 +340,17 @@ func TestAccVirtualMachine_changeOSDiskVhdUri(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLinuxMachine(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Config: r.basicLinuxMachineWithOSDiskVhdUriChanged(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -364,10 +363,10 @@ func TestAccVirtualMachine_plan(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.plan(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -378,16 +377,16 @@ func TestAccVirtualMachine_changeSSHKey(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.linuxMachineWithSSH(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Config: r.linuxMachineWithSSHRemoved(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -398,24 +397,24 @@ func TestAccVirtualMachine_optionalOSProfile(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Destroy: false, Config: r.basicLinuxMachine(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Destroy: false, Config: r.basicLinuxMachine_destroy(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).DoesNotExistInAzure(r), ), }, { Config: r.basicLinuxMachine_attach_without_osProfile(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -426,10 +425,10 @@ func TestAccVirtualMachine_primaryNetworkInterfaceId(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_virtual_machine", "test") r := VirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.primaryNetworkInterfaceId(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/compute/windows_virtual_machine_resource.go b/azurerm/internal/services/compute/windows_virtual_machine_resource.go index 7b8e0756a5aa..94d530d01b53 100644 --- a/azurerm/internal/services/compute/windows_virtual_machine_resource.go +++ b/azurerm/internal/services/compute/windows_virtual_machine_resource.go @@ -9,9 +9,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" azValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" @@ -24,14 +21,15 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/base64" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) // TODO: confirm locking as appropriate -func resourceWindowsVirtualMachine() *schema.Resource { - return &schema.Resource{ +func resourceWindowsVirtualMachine() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceWindowsVirtualMachineCreate, Read: resourceWindowsVirtualMachineRead, Update: resourceWindowsVirtualMachineUpdate, @@ -42,16 +40,16 @@ func resourceWindowsVirtualMachine() *schema.Resource { return err }, importVirtualMachine(compute.Windows, "azurerm_windows_virtual_machine")), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(45 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(45 * time.Minute), - Delete: schema.DefaultTimeout(45 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(45 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(45 * time.Minute), + Delete: pluginsdk.DefaultTimeout(45 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: computeValidate.VirtualMachineName, @@ -63,7 +61,7 @@ func resourceWindowsVirtualMachine() *schema.Resource { // Required "admin_password": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, Sensitive: true, @@ -71,18 +69,18 @@ func resourceWindowsVirtualMachine() *schema.Resource { }, "admin_username": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, "network_interface_ids": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, MinItems: 1, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: networkValidate.NetworkInterfaceID, }, }, @@ -90,7 +88,7 @@ func resourceWindowsVirtualMachine() *schema.Resource { "os_disk": virtualMachineOSDiskSchema(), "size": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, @@ -101,13 +99,13 @@ func resourceWindowsVirtualMachine() *schema.Resource { "additional_unattend_content": additionalUnattendContentSchema(), "allow_extension_operations": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, "availability_set_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: computeValidate.AvailabilitySetID, @@ -124,7 +122,7 @@ func resourceWindowsVirtualMachine() *schema.Resource { "boot_diagnostics": bootDiagnosticsSchema(), "computer_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, // Computed since we reuse the VM name if one's not specified @@ -137,7 +135,7 @@ func resourceWindowsVirtualMachine() *schema.Resource { "custom_data": base64.OptionalSchema(true), "dedicated_host_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: computeValidate.DedicatedHostID, // the Compute/VM API is broken and returns the Resource Group name in UPPERCASE :shrug: @@ -147,20 +145,20 @@ func resourceWindowsVirtualMachine() *schema.Resource { }, "enable_automatic_updates": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, ForceNew: true, // updating this is not allowed "Changing property 'windowsConfiguration.enableAutomaticUpdates' is not allowed." Target="windowsConfiguration.enableAutomaticUpdates" Default: true, }, "encryption_at_host_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, }, "eviction_policy": { // only applicable when `priority` is set to `Spot` - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ @@ -170,23 +168,23 @@ func resourceWindowsVirtualMachine() *schema.Resource { }, "extensions_time_budget": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: "PT1H30M", ValidateFunc: azValidate.ISO8601DurationBetween("PT15M", "PT2H"), }, - "identity": virtualMachineIdentitySchema(), + "identity": virtualMachineIdentity{}.Schema(), "license_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ "None", "Windows_Client", "Windows_Server", }, false), - DiffSuppressFunc: func(_, old, new string, _ *schema.ResourceData) bool { + DiffSuppressFunc: func(_, old, new string, _ *pluginsdk.ResourceData) bool { if old == "None" && new == "" || old == "" && new == "None" { return true } @@ -196,7 +194,7 @@ func resourceWindowsVirtualMachine() *schema.Resource { }, "max_bid_price": { - Type: schema.TypeFloat, + Type: pluginsdk.TypeFloat, Optional: true, Default: -1, ValidateFunc: validation.FloatAtLeast(-1.0), @@ -204,7 +202,7 @@ func resourceWindowsVirtualMachine() *schema.Resource { // This is a preview feature: `az feature register -n InGuestAutoPatchVMPreview --namespace Microsoft.Compute` "patch_mode": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: string(compute.WindowsVMGuestPatchModeAutomaticByOS), ValidateFunc: validation.StringInSlice([]string{ @@ -217,7 +215,7 @@ func resourceWindowsVirtualMachine() *schema.Resource { "plan": planSchema(), "priority": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, Default: string(compute.Regular), @@ -228,16 +226,15 @@ func resourceWindowsVirtualMachine() *schema.Resource { }, "provision_vm_agent": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, ForceNew: true, }, "proximity_placement_group_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, - ForceNew: true, ValidateFunc: computeValidate.ProximityPlacementGroupID, // the Compute/VM API is broken and returns the Resource Group name in UPPERCASE :shrug: DiffSuppressFunc: suppress.CaseDifference, @@ -246,7 +243,7 @@ func resourceWindowsVirtualMachine() *schema.Resource { "secret": windowsSecretSchema(), "source_image_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validation.Any( @@ -261,13 +258,13 @@ func resourceWindowsVirtualMachine() *schema.Resource { "tags": tags.Schema(), "timezone": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: computeValidate.VirtualMachineTimeZone(), }, "virtual_machine_scale_set_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ConflictsWith: []string{ @@ -277,7 +274,7 @@ func resourceWindowsVirtualMachine() *schema.Resource { }, "platform_fault_domain": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Default: -1, ForceNew: true, @@ -288,7 +285,7 @@ func resourceWindowsVirtualMachine() *schema.Resource { "winrm_listener": winRmListenerSchema(), "zone": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, // this has to be computed because when you are trying to assign this VM to a VMSS in VMO mode with zones, @@ -302,36 +299,36 @@ func resourceWindowsVirtualMachine() *schema.Resource { // Computed "private_ip_address": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "private_ip_addresses": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "public_ip_address": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "public_ip_addresses": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "virtual_machine_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, } } -func resourceWindowsVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error { +func resourceWindowsVirtualMachineCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -406,7 +403,7 @@ func resourceWindowsVirtualMachineCreate(d *schema.ResourceData, meta interface{ return err } - winRmListenersRaw := d.Get("winrm_listener").(*schema.Set).List() + winRmListenersRaw := d.Get("winrm_listener").(*pluginsdk.Set).List() winRmListeners := expandWinRMListener(winRmListenersRaw) params := compute.VirtualMachine{ @@ -561,7 +558,7 @@ func resourceWindowsVirtualMachineCreate(d *schema.ResourceData, meta interface{ return resourceWindowsVirtualMachineRead(d, meta) } -func resourceWindowsVirtualMachineRead(d *schema.ResourceData, meta interface{}) error { +func resourceWindowsVirtualMachineRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMClient disksClient := meta.(*clients.Client).Compute.DisksClient networkInterfacesClient := meta.(*clients.Client).Network.InterfacesClient @@ -754,7 +751,7 @@ func resourceWindowsVirtualMachineRead(d *schema.ResourceData, meta interface{}) return tags.FlattenAndSet(d, resp.Tags) } -func resourceWindowsVirtualMachineUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceWindowsVirtualMachineUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMClient ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -931,6 +928,22 @@ func resourceWindowsVirtualMachineUpdate(d *schema.ResourceData, meta interface{ } } + if d.HasChange("proximity_placement_group_id") { + shouldUpdate = true + + // Code="OperationNotAllowed" Message="Updating proximity placement group of VM is not allowed while the VM is running. Please stop/deallocate the VM and retry the operation." + shouldShutDown = true + shouldDeallocate = true + + if ppgIDRaw, ok := d.GetOk("proximity_placement_group_id"); ok { + update.VirtualMachineProperties.ProximityPlacementGroup = &compute.SubResource{ + ID: utils.String(ppgIDRaw.(string)), + } + } else { + update.VirtualMachineProperties.ProximityPlacementGroup = &compute.SubResource{} + } + } + if d.HasChange("size") { shouldUpdate = true @@ -1170,7 +1183,7 @@ func resourceWindowsVirtualMachineUpdate(d *schema.ResourceData, meta interface{ return resourceWindowsVirtualMachineRead(d, meta) } -func resourceWindowsVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error { +func resourceWindowsVirtualMachineDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -1193,31 +1206,37 @@ func resourceWindowsVirtualMachineDelete(d *schema.ResourceData, meta interface{ return fmt.Errorf("retrieving Windows Virtual Machine %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) } - // If the VM was in a Failed state we can skip powering off, since that'll fail - if strings.EqualFold(*existing.ProvisioningState, "failed") { - log.Printf("[DEBUG] Powering Off Windows Virtual Machine was skipped because the VM was in %q state %q (Resource Group %q).", *existing.ProvisioningState, id.Name, id.ResourceGroup) - } else { - //ISSUE: 4920 - // shutting down the Virtual Machine prior to removing it means users are no longer charged for some Azure resources - // thus this can be a large cost-saving when deleting larger instances - // https://docs.microsoft.com/en-us/azure/virtual-machines/states-lifecycle - log.Printf("[DEBUG] Powering Off Windows Virtual Machine %q (Resource Group %q)..", id.Name, id.ResourceGroup) - skipShutdown := !meta.(*clients.Client).Features.VirtualMachine.GracefulShutdown - powerOffFuture, err := client.PowerOff(ctx, id.ResourceGroup, id.Name, utils.Bool(skipShutdown)) - if err != nil { - return fmt.Errorf("powering off Windows Virtual Machine %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) - } - if err := powerOffFuture.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("waiting for power off of Windows Virtual Machine %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + if !meta.(*clients.Client).Features.VirtualMachine.SkipShutdownAndForceDelete { + // If the VM was in a Failed state we can skip powering off, since that'll fail + if strings.EqualFold(*existing.ProvisioningState, "failed") { + log.Printf("[DEBUG] Powering Off Windows Virtual Machine was skipped because the VM was in %q state %q (Resource Group %q).", *existing.ProvisioningState, id.Name, id.ResourceGroup) + } else { + //ISSUE: 4920 + // shutting down the Virtual Machine prior to removing it means users are no longer charged for some Azure resources + // thus this can be a large cost-saving when deleting larger instances + // https://docs.microsoft.com/en-us/azure/virtual-machines/states-lifecycle + log.Printf("[DEBUG] Powering Off Windows Virtual Machine %q (Resource Group %q)..", id.Name, id.ResourceGroup) + skipShutdown := !meta.(*clients.Client).Features.VirtualMachine.GracefulShutdown + powerOffFuture, err := client.PowerOff(ctx, id.ResourceGroup, id.Name, utils.Bool(skipShutdown)) + if err != nil { + return fmt.Errorf("powering off Windows Virtual Machine %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + } + if err := powerOffFuture.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for power off of Windows Virtual Machine %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + } + log.Printf("[DEBUG] Powered Off Windows Virtual Machine %q (Resource Group %q).", id.Name, id.ResourceGroup) } - log.Printf("[DEBUG] Powered Off Windows Virtual Machine %q (Resource Group %q).", id.Name, id.ResourceGroup) } log.Printf("[DEBUG] Deleting Windows Virtual Machine %q (Resource Group %q)..", id.Name, id.ResourceGroup) - // @tombuildsstuff: sending `nil` here omits this value from being sent - which matches - // the previous behaviour - we're only splitting this out so it's clear why - // TODO: support force deletion once it's out of Preview, if applicable + + // Force Delete is in an opt-in Preview and can only be specified (true/false) if the feature is enabled + // as such we default this to `nil` which matches the previous behaviour (where this isn't sent) and + // conditionally set this if required var forceDeletion *bool = nil + if meta.(*clients.Client).Features.VirtualMachine.SkipShutdownAndForceDelete { + forceDeletion = utils.Bool(true) + } deleteFuture, err := client.Delete(ctx, id.ResourceGroup, id.Name, forceDeletion) if err != nil { return fmt.Errorf("deleting Windows Virtual Machine %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) @@ -1277,11 +1296,11 @@ func resourceWindowsVirtualMachineDelete(d *schema.ResourceData, meta interface{ if !utils.ResponseWasNotFound(virtualMachine.Response) { log.Printf("[INFO] Windows Virtual Machine still exists, waiting on Windows Virtual Machine %q to be deleted", id.Name) - deleteWait := &resource.StateChangeConf{ + deleteWait := &pluginsdk.StateChangeConf{ Pending: []string{"200"}, Target: []string{"404"}, MinTimeout: 30 * time.Second, - Timeout: d.Timeout(schema.TimeoutDelete), + Timeout: d.Timeout(pluginsdk.TimeoutDelete), Refresh: func() (interface{}, string, error) { log.Printf("[INFO] checking on state of Windows Virtual Machine %q", id.Name) resp, err := client.Get(ctx, id.ResourceGroup, id.Name, "") @@ -1295,7 +1314,7 @@ func resourceWindowsVirtualMachineDelete(d *schema.ResourceData, meta interface{ }, } - if _, err := deleteWait.WaitForState(); err != nil { + if _, err := deleteWait.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for the deletion of Windows Virtual Machine %q (Resource Group %q): %v", id.Name, id.ResourceGroup, err) } } diff --git a/azurerm/internal/services/compute/windows_virtual_machine_resource_auth_test.go b/azurerm/internal/services/compute/windows_virtual_machine_resource_auth_test.go index 075f19f71e3a..9095425555bb 100644 --- a/azurerm/internal/services/compute/windows_virtual_machine_resource_auth_test.go +++ b/azurerm/internal/services/compute/windows_virtual_machine_resource_auth_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,10 +12,10 @@ func TestAccWindowsVirtualMachine_authPassword(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/compute/windows_virtual_machine_resource_disk_os_test.go b/azurerm/internal/services/compute/windows_virtual_machine_resource_disk_os_test.go index 674adba6d5ba..482eca4db3a3 100644 --- a/azurerm/internal/services/compute/windows_virtual_machine_resource_disk_os_test.go +++ b/azurerm/internal/services/compute/windows_virtual_machine_resource_disk_os_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,10 +12,10 @@ func TestAccWindowsVirtualMachine_diskOSBasic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSBasic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -30,10 +29,10 @@ func TestAccWindowsVirtualMachine_diskOSCachingType(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSCachingType(data, "None"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -42,7 +41,7 @@ func TestAccWindowsVirtualMachine_diskOSCachingType(t *testing.T) { ), { Config: r.diskOSCachingType(data, "ReadOnly"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -51,7 +50,7 @@ func TestAccWindowsVirtualMachine_diskOSCachingType(t *testing.T) { ), { Config: r.diskOSCachingType(data, "ReadWrite"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -65,10 +64,10 @@ func TestAccWindowsVirtualMachine_diskOSCustomName(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSCustomName(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -82,10 +81,10 @@ func TestAccWindowsVirtualMachine_diskOSCustomSize(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSCustomSize(data, 130), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -99,10 +98,10 @@ func TestAccWindowsVirtualMachine_diskOSCustomSizeExpanded(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSCustomSize(data, 130), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -111,7 +110,7 @@ func TestAccWindowsVirtualMachine_diskOSCustomSizeExpanded(t *testing.T) { ), { Config: r.diskOSCustomSize(data, 140), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -125,10 +124,10 @@ func TestAccWindowsVirtualMachine_diskOSDiskEncryptionSet(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSDiskDiskEncryptionSetEncrypted(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -140,17 +139,17 @@ func TestAccWindowsVirtualMachine_diskOSDiskEncryptionSetUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSDiskDiskEncryptionSetUnencrypted(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("admin_password"), { Config: r.diskOSDiskDiskEncryptionSetEncrypted(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -162,10 +161,10 @@ func TestAccWindowsVirtualMachine_diskOSEphemeral(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSEphemeral(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -179,10 +178,10 @@ func TestAccWindowsVirtualMachine_diskOSStorageTypeStandardLRS(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSStorageAccountType(data, "Standard_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -196,10 +195,10 @@ func TestAccWindowsVirtualMachine_diskOSStorageTypeStandardSSDLRS(t *testing.T) data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSStorageAccountType(data, "StandardSSD_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -213,10 +212,10 @@ func TestAccWindowsVirtualMachine_diskOSStorageTypePremiumLRS(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSStorageAccountType(data, "Premium_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -230,10 +229,10 @@ func TestAccWindowsVirtualMachine_diskOSStorageTypeUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskOSStorageAccountType(data, "Standard_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -242,7 +241,7 @@ func TestAccWindowsVirtualMachine_diskOSStorageTypeUpdate(t *testing.T) { ), { Config: r.diskOSStorageAccountType(data, "Premium_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -251,7 +250,7 @@ func TestAccWindowsVirtualMachine_diskOSStorageTypeUpdate(t *testing.T) { ), { Config: r.diskOSStorageAccountType(data, "StandardSSD_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -260,7 +259,7 @@ func TestAccWindowsVirtualMachine_diskOSStorageTypeUpdate(t *testing.T) { ), { Config: r.diskOSStorageAccountType(data, "Standard_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -274,11 +273,11 @@ func TestAccWindowsVirtualMachine_diskOSWriteAcceleratorEnabled(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // Enabled Config: r.diskOSWriteAcceleratorEnabled(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -288,7 +287,7 @@ func TestAccWindowsVirtualMachine_diskOSWriteAcceleratorEnabled(t *testing.T) { { // Disabled Config: r.diskOSWriteAcceleratorEnabled(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -298,7 +297,7 @@ func TestAccWindowsVirtualMachine_diskOSWriteAcceleratorEnabled(t *testing.T) { { // Enabled Config: r.diskOSWriteAcceleratorEnabled(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/compute/windows_virtual_machine_resource_identity_test.go b/azurerm/internal/services/compute/windows_virtual_machine_resource_identity_test.go index 27fe40174fca..126790994b5f 100644 --- a/azurerm/internal/services/compute/windows_virtual_machine_resource_identity_test.go +++ b/azurerm/internal/services/compute/windows_virtual_machine_resource_identity_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,10 +12,10 @@ func TestAccWindowsVirtualMachine_identityNone(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.identityNone(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.%").HasValue("0"), ), @@ -31,10 +30,10 @@ func TestAccWindowsVirtualMachine_identitySystemAssigned(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.identitySystemAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -48,10 +47,10 @@ func TestAccWindowsVirtualMachine_identitySystemAssignedUserAssigned(t *testing. data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.identitySystemAssignedUserAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -65,10 +64,10 @@ func TestAccWindowsVirtualMachine_identityUserAssigned(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.identityUserAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -77,7 +76,7 @@ func TestAccWindowsVirtualMachine_identityUserAssigned(t *testing.T) { ), { Config: r.identityUserAssignedUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -91,10 +90,10 @@ func TestAccWindowsVirtualMachine_identityUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.identityNone(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.%").HasValue("0"), ), @@ -104,7 +103,7 @@ func TestAccWindowsVirtualMachine_identityUpdate(t *testing.T) { ), { Config: r.identitySystemAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -113,7 +112,7 @@ func TestAccWindowsVirtualMachine_identityUpdate(t *testing.T) { ), { Config: r.identityUserAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -122,7 +121,7 @@ func TestAccWindowsVirtualMachine_identityUpdate(t *testing.T) { ), { Config: r.identitySystemAssignedUserAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -131,7 +130,7 @@ func TestAccWindowsVirtualMachine_identityUpdate(t *testing.T) { ), { Config: r.identityNone(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.%").HasValue("0"), ), diff --git a/azurerm/internal/services/compute/windows_virtual_machine_resource_images_test.go b/azurerm/internal/services/compute/windows_virtual_machine_resource_images_test.go index 8a722902a91d..787ee9517560 100644 --- a/azurerm/internal/services/compute/windows_virtual_machine_resource_images_test.go +++ b/azurerm/internal/services/compute/windows_virtual_machine_resource_images_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -19,18 +18,18 @@ func TestAccWindowsVirtualMachine_imageFromImage(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // create the original VM Config: r.imageFromExistingMachinePrep(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(r.generalizeVirtualMachine, "azurerm_windows_virtual_machine.source"), ), }, { // then create an image from that VM, and then create a VM from that image Config: r.imageFromImage(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -44,10 +43,10 @@ func TestAccWindowsVirtualMachine_imageFromPlan(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.imageFromPlan(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -61,18 +60,18 @@ func TestAccWindowsVirtualMachine_imageFromSharedImageGallery(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // create the original VM Config: r.imageFromExistingMachinePrep(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( data.CheckWithClientForResource(r.generalizeVirtualMachine, "azurerm_windows_virtual_machine.source"), ), }, { // then create an image from that VM, and then create a VM from that image Config: r.imageFromSharedImageGallery(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -86,10 +85,10 @@ func TestAccWindowsVirtualMachine_imageFromSourceImageReference(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.imageFromSourceImageReference(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -360,7 +359,7 @@ resource "azurerm_windows_virtual_machine" "test" { `, r.template(data)) } -func (WindowsVirtualMachineResource) generalizeVirtualMachine(ctx context.Context, client *clients.Client, state *terraform.InstanceState) error { +func (WindowsVirtualMachineResource) generalizeVirtualMachine(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) error { id, err := parse.VirtualMachineID(state.ID) if err != nil { return err diff --git a/azurerm/internal/services/compute/windows_virtual_machine_resource_network_test.go b/azurerm/internal/services/compute/windows_virtual_machine_resource_network_test.go index d24d54a38303..50d9385fa516 100644 --- a/azurerm/internal/services/compute/windows_virtual_machine_resource_network_test.go +++ b/azurerm/internal/services/compute/windows_virtual_machine_resource_network_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -19,10 +18,10 @@ func TestAccWindowsVirtualMachine_networkIPv6(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkIPv6(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").HasValue(""), @@ -38,10 +37,10 @@ func TestAccWindowsVirtualMachine_networkMultiple(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkMultiple(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("private_ip_addresses.#").HasValue("2"), @@ -55,7 +54,7 @@ func TestAccWindowsVirtualMachine_networkMultiple(t *testing.T) { { // update the Primary IP Config: r.networkMultipleUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("private_ip_addresses.#").HasValue("2"), @@ -69,7 +68,7 @@ func TestAccWindowsVirtualMachine_networkMultiple(t *testing.T) { { // remove the secondary IP Config: r.networkMultipleRemoved(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("private_ip_addresses.#").HasValue("1"), @@ -87,10 +86,10 @@ func TestAccWindowsVirtualMachine_networkMultiplePublic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkMultiplePublic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("private_ip_addresses.#").HasValue("2"), @@ -104,7 +103,7 @@ func TestAccWindowsVirtualMachine_networkMultiplePublic(t *testing.T) { { // update the Primary IP Config: r.networkMultiplePublicUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("private_ip_addresses.#").HasValue("2"), @@ -118,7 +117,7 @@ func TestAccWindowsVirtualMachine_networkMultiplePublic(t *testing.T) { { // remove the secondary IP Config: r.networkMultiplePublicRemoved(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("private_ip_addresses.#").HasValue("1"), @@ -136,10 +135,10 @@ func TestAccWindowsVirtualMachine_networkPrivateDynamicIP(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPrivateDynamicIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").HasValue(""), @@ -155,10 +154,10 @@ func TestAccWindowsVirtualMachine_networkPrivateStaticIP(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPrivateStaticIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").HasValue(""), @@ -174,10 +173,10 @@ func TestAccWindowsVirtualMachine_networkPrivateUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPrivateDynamicIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").HasValue(""), @@ -188,7 +187,7 @@ func TestAccWindowsVirtualMachine_networkPrivateUpdate(t *testing.T) { ), { Config: r.networkPrivateStaticIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").HasValue(""), @@ -204,10 +203,10 @@ func TestAccWindowsVirtualMachine_networkPublicDynamicPrivateDynamicIP(t *testin data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPublicDynamicPrivateDynamicIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").Exists(), @@ -223,10 +222,10 @@ func TestAccWindowsVirtualMachine_networkPublicDynamicPrivateStaticIP(t *testing data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPublicDynamicPrivateStaticIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").Exists(), @@ -242,10 +241,10 @@ func TestAccWindowsVirtualMachine_networkPublicDynamicUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPublicDynamicPrivateDynamicIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").Exists(), @@ -256,7 +255,7 @@ func TestAccWindowsVirtualMachine_networkPublicDynamicUpdate(t *testing.T) { ), { Config: r.networkPublicDynamicPrivateStaticIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").Exists(), @@ -272,10 +271,10 @@ func TestAccWindowsVirtualMachine_networkPublicStaticPrivateDynamicIP(t *testing data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPublicStaticPrivateDynamicIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").Exists(), @@ -291,10 +290,10 @@ func TestAccWindowsVirtualMachine_networkPublicStaticPrivateStaticIP(t *testing. data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPublicStaticPrivateStaticIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").Exists(), @@ -310,10 +309,10 @@ func TestAccWindowsVirtualMachine_networkPublicStaticPrivateUpdate(t *testing.T) data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPublicStaticPrivateDynamicIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").Exists(), @@ -324,7 +323,7 @@ func TestAccWindowsVirtualMachine_networkPublicStaticPrivateUpdate(t *testing.T) ), { Config: r.networkPublicStaticPrivateStaticIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_ip_address").Exists(), check.That(data.ResourceName).Key("public_ip_address").Exists(), diff --git a/azurerm/internal/services/compute/windows_virtual_machine_resource_orchestrated_test.go b/azurerm/internal/services/compute/windows_virtual_machine_resource_orchestrated_test.go index 2d46184e4545..c09265dcdcdd 100644 --- a/azurerm/internal/services/compute/windows_virtual_machine_resource_orchestrated_test.go +++ b/azurerm/internal/services/compute/windows_virtual_machine_resource_orchestrated_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,10 +12,10 @@ func TestAccWindowsVirtualMachine_orchestratedZonal(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.orchestratedZonal(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -28,10 +27,10 @@ func TestAccWindowsVirtualMachine_orchestratedWithPlatformFaultDomain(t *testing data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.orchestratedWithPlatformFaultDomain(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -43,10 +42,10 @@ func TestAccWindowsVirtualMachine_orchestratedZonalWithProximityPlacementGroup(t data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.orchestratedZonalWithProximityPlacementGroup(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -58,10 +57,10 @@ func TestAccWindowsVirtualMachine_orchestratedNonZonal(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.orchestratedNonZonal(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -73,10 +72,10 @@ func TestAccWindowsVirtualMachine_orchestratedMultipleZonal(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.orchestratedMultipleZonal(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -88,10 +87,10 @@ func TestAccWindowsVirtualMachine_orchestratedMultipleNoneZonal(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.orchestratedMultipleNonZonal(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/compute/windows_virtual_machine_resource_other_test.go b/azurerm/internal/services/compute/windows_virtual_machine_resource_other_test.go index 3bbcfdfad2b9..33a07231b620 100644 --- a/azurerm/internal/services/compute/windows_virtual_machine_resource_other_test.go +++ b/azurerm/internal/services/compute/windows_virtual_machine_resource_other_test.go @@ -5,7 +5,6 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -14,10 +13,10 @@ func TestAccWindowsVirtualMachine_otherPatchModeManual(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherPatchModeManual(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -31,10 +30,10 @@ func TestAccWindowsVirtualMachine_otherPatchModeAutomaticByOS(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherPatchModeAutomaticByOS(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -48,10 +47,10 @@ func TestAccWindowsVirtualMachine_otherPatchModeAutomaticByPlatform(t *testing.T data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherPatchModeAutomaticByPlatform(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -65,10 +64,10 @@ func TestAccWindowsVirtualMachine_otherPatchModeUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherPatchModeAutomaticByOS(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -77,7 +76,7 @@ func TestAccWindowsVirtualMachine_otherPatchModeUpdate(t *testing.T) { ), { Config: r.otherPatchModeAutomaticByPlatform(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -86,7 +85,7 @@ func TestAccWindowsVirtualMachine_otherPatchModeUpdate(t *testing.T) { ), { Config: r.otherPatchModeManual(data), // this update requires force replacement actually - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -100,10 +99,10 @@ func TestAccWindowsVirtualMachine_otherAdditionalUnattendContent(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherAdditionalUnattendContent(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -118,10 +117,10 @@ func TestAccWindowsVirtualMachine_otherAllowExtensionOperationsDefault(t *testin data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherAllowExtensionOperationsDefault(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("allow_extension_operations").HasValue("true"), ), @@ -136,10 +135,10 @@ func TestAccWindowsVirtualMachine_otherAllowExtensionOperationsDisabled(t *testi data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherAllowExtensionOperationsDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("allow_extension_operations").HasValue("false"), ), @@ -154,10 +153,10 @@ func TestAccWindowsVirtualMachine_otherAllowExtensionOperationsUpdated(t *testin data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherAllowExtensionOperationsDefault(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("allow_extension_operations").HasValue("true"), ), @@ -167,7 +166,7 @@ func TestAccWindowsVirtualMachine_otherAllowExtensionOperationsUpdated(t *testin ), { Config: r.otherAllowExtensionOperationsDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("allow_extension_operations").HasValue("false"), ), @@ -182,10 +181,10 @@ func TestAccWindowsVirtualMachine_otherAllowExtensionOperationsUpdatedWithoutVmA data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherAllowExtensionOperationsDisabledWithoutVmAgent(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("allow_extension_operations").HasValue("false"), ), @@ -195,7 +194,7 @@ func TestAccWindowsVirtualMachine_otherAllowExtensionOperationsUpdatedWithoutVmA ), { Config: r.otherAllowExtensionOperationsEnabledWithoutVmAgent(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("allow_extension_operations").HasValue("true"), ), @@ -210,10 +209,10 @@ func TestAccWindowsVirtualMachine_otherExtensionsTimeBudget(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherExtensionsTimeBudget(data, "PT30M"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("extensions_time_budget").HasValue("PT30M"), ), @@ -228,10 +227,10 @@ func TestAccWindowsVirtualMachine_otherExtensionsTimeBudgetUpdate(t *testing.T) data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherExtensionsTimeBudget(data, "PT30M"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("extensions_time_budget").HasValue("PT30M"), ), @@ -241,7 +240,7 @@ func TestAccWindowsVirtualMachine_otherExtensionsTimeBudgetUpdate(t *testing.T) ), { Config: r.otherExtensionsTimeBudget(data, "PT50M"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("extensions_time_budget").HasValue("PT50M"), ), @@ -251,7 +250,7 @@ func TestAccWindowsVirtualMachine_otherExtensionsTimeBudgetUpdate(t *testing.T) ), { Config: r.otherExtensionsTimeBudget(data, "PT30M"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("extensions_time_budget").HasValue("PT30M"), ), @@ -266,11 +265,11 @@ func TestAccWindowsVirtualMachine_otherBootDiagnostics(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // Enabled Config: r.otherBootDiagnostics(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -280,7 +279,7 @@ func TestAccWindowsVirtualMachine_otherBootDiagnostics(t *testing.T) { { // Disabled Config: r.otherBootDiagnosticsDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -290,7 +289,7 @@ func TestAccWindowsVirtualMachine_otherBootDiagnostics(t *testing.T) { { // Enabled Config: r.otherBootDiagnostics(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -304,11 +303,11 @@ func TestAccWindowsVirtualMachine_otherBootDiagnosticsManaged(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // Enabled Config: r.otherBootDiagnosticsManaged(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -318,7 +317,7 @@ func TestAccWindowsVirtualMachine_otherBootDiagnosticsManaged(t *testing.T) { { // Disabled Config: r.otherBootDiagnosticsDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -328,7 +327,7 @@ func TestAccWindowsVirtualMachine_otherBootDiagnosticsManaged(t *testing.T) { { // Enabled Config: r.otherBootDiagnosticsManaged(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -342,10 +341,10 @@ func TestAccWindowsVirtualMachine_otherComputerNameDefault(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherComputerNameDefault(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("computer_name").Exists(), ), @@ -360,7 +359,7 @@ func TestAccWindowsVirtualMachine_otherComputerNameDefaultInvalid(t *testing.T) data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherComputerNameDefaultInvalid(data), ExpectError: regexp.MustCompile("unable to assume default computer name"), @@ -372,10 +371,10 @@ func TestAccWindowsVirtualMachine_otherComputerNameCustom(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherComputerNameCustom(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("computer_name").HasValue("custom123"), ), @@ -390,10 +389,10 @@ func TestAccWindowsVirtualMachine_otherCustomData(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherCustomData(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -405,10 +404,10 @@ func TestAccWindowsVirtualMachine_otherEnableAutomaticUpdatesDefault(t *testing. data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherEnableAutomaticUpdatesDefault(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("enable_automatic_updates").HasValue("true"), ), @@ -423,10 +422,10 @@ func TestAccWindowsVirtualMachine_otherEnableAutomaticUpdatesDisabled(t *testing data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherEnableAutomaticUpdatesDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("enable_automatic_updates").HasValue("false"), ), @@ -437,14 +436,31 @@ func TestAccWindowsVirtualMachine_otherEnableAutomaticUpdatesDisabled(t *testing }) } +func TestAccWindowsVirtualMachine_otherSkipShutdownAndForceDelete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") + r := WindowsVirtualMachineResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.otherSkipShutdownAndForceDelete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep( + "admin_password", + ), + }) +} + func TestAccWindowsVirtualMachine_otherLicenseTypeNone(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherLicenseType(data, "None"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -458,10 +474,10 @@ func TestAccWindowsVirtualMachine_otherLicenseTypeWindowsClient(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherLicenseTypeWindowsClient(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -475,10 +491,10 @@ func TestAccWindowsVirtualMachine_otherLicenseTypeWindowsServer(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherLicenseType(data, "Windows_Server"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -492,10 +508,10 @@ func TestAccWindowsVirtualMachine_otherLicenseTypeUpdated(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherLicenseTypeDefault(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -504,7 +520,7 @@ func TestAccWindowsVirtualMachine_otherLicenseTypeUpdated(t *testing.T) { ), { Config: r.otherLicenseTypeWindowsClient(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("license_type").HasValue("Windows_Client"), ), @@ -514,7 +530,7 @@ func TestAccWindowsVirtualMachine_otherLicenseTypeUpdated(t *testing.T) { ), { Config: r.otherLicenseTypeDefault(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -528,10 +544,10 @@ func TestAccWindowsVirtualMachine_otherPrioritySpot(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherPrioritySpot(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -545,11 +561,11 @@ func TestAccWindowsVirtualMachine_otherPrioritySpotMaxBidPrice(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // expensive, but guarantees this test will pass Config: r.otherPrioritySpotMaxBidPrice(data, "0.5000"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -559,7 +575,7 @@ func TestAccWindowsVirtualMachine_otherPrioritySpotMaxBidPrice(t *testing.T) { { // no limit Config: r.otherPrioritySpotMaxBidPrice(data, "-1"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -573,10 +589,10 @@ func TestAccWindowsVirtualMachine_otherProvisionVMAgentDefault(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherProvisionVMAgentDefault(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("provision_vm_agent").HasValue("true"), ), @@ -591,10 +607,10 @@ func TestAccWindowsVirtualMachine_otherProvisionVMAgentDisabled(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherProvisionVMAgentDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("provision_vm_agent").HasValue("false"), ), @@ -609,10 +625,10 @@ func TestAccWindowsVirtualMachine_otherRequiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -624,10 +640,10 @@ func TestAccWindowsVirtualMachine_otherSecret(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherSecret(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -636,7 +652,7 @@ func TestAccWindowsVirtualMachine_otherSecret(t *testing.T) { ), { Config: r.otherSecretUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -645,7 +661,7 @@ func TestAccWindowsVirtualMachine_otherSecret(t *testing.T) { ), { Config: r.otherSecretRemoved(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("secret.#").HasValue("0"), ), @@ -660,10 +676,10 @@ func TestAccWindowsVirtualMachine_otherTags(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherTags(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -672,7 +688,7 @@ func TestAccWindowsVirtualMachine_otherTags(t *testing.T) { ), { Config: r.otherTagsUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -686,10 +702,10 @@ func TestAccWindowsVirtualMachine_otherTimeZone(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherTimeZone(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -703,10 +719,10 @@ func TestAccWindowsVirtualMachine_otherUltraSsdDefault(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherUltraSsd(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("additional_capabilities.0.ultra_ssd_enabled").HasValue("false"), ), @@ -721,10 +737,10 @@ func TestAccWindowsVirtualMachine_otherUltraSsdEnabled(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherUltraSsd(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("additional_capabilities.0.ultra_ssd_enabled").HasValue("true"), ), @@ -739,10 +755,10 @@ func TestAccWindowsVirtualMachine_otherUltraSsdUpdated(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherUltraSsd(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("additional_capabilities.0.ultra_ssd_enabled").HasValue("false"), ), @@ -752,7 +768,7 @@ func TestAccWindowsVirtualMachine_otherUltraSsdUpdated(t *testing.T) { ), { Config: r.otherUltraSsd(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("additional_capabilities.0.ultra_ssd_enabled").HasValue("true"), ), @@ -767,10 +783,10 @@ func TestAccWindowsVirtualMachine_otherWinRMHTTP(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherWinRMHTTP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -784,10 +800,10 @@ func TestAccWindowsVirtualMachine_otherWinRMHTTPS(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherWinRMHTTPS(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -801,10 +817,10 @@ func TestAccWindowsVirtualMachine_otherEncryptionAtHostEnabled(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherEncryptionAtHostEnabled(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -818,10 +834,10 @@ func TestAccWindowsVirtualMachine_otherEncryptionAtHostEnabledUpdate(t *testing. data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherEncryptionAtHostEnabled(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -830,7 +846,7 @@ func TestAccWindowsVirtualMachine_otherEncryptionAtHostEnabledUpdate(t *testing. ), { Config: r.otherEncryptionAtHostEnabled(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -839,7 +855,7 @@ func TestAccWindowsVirtualMachine_otherEncryptionAtHostEnabledUpdate(t *testing. ), { Config: r.otherEncryptionAtHostEnabled(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -853,10 +869,10 @@ func TestAccWindowsVirtualMachine_otherEncryptionAtHostEnabledWithCMK(t *testing data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherEncryptionAtHostEnabledWithCMK(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -970,10 +986,10 @@ func TestAccWindowsVirtualMachine_otherGracefulShutdownDisabled(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherGracefulShutdown(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -987,10 +1003,10 @@ func TestAccWindowsVirtualMachine_otherGracefulShutdownEnabled(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherGracefulShutdown(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -1485,6 +1501,44 @@ resource "azurerm_windows_virtual_machine" "test" { `, r.template(data)) } +func (r WindowsVirtualMachineResource) otherSkipShutdownAndForceDelete(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features { + virtual_machine { + skip_shutdown_and_force_delete = true + } + } +} + +%s + +resource "azurerm_windows_virtual_machine" "test" { + name = local.vm_name + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + size = "Standard_F2" + admin_username = "adminuser" + admin_password = "P@$$w0rd1234!" + network_interface_ids = [ + azurerm_network_interface.test.id, + ] + + os_disk { + caching = "ReadWrite" + storage_account_type = "Standard_LRS" + } + + source_image_reference { + publisher = "MicrosoftWindowsServer" + offer = "WindowsServer" + sku = "2016-Datacenter" + version = "latest" + } +} +`, r.template(data)) +} + func (r WindowsVirtualMachineResource) otherLicenseTypeDefault(data acceptance.TestData) string { return fmt.Sprintf(` %s diff --git a/azurerm/internal/services/compute/windows_virtual_machine_resource_scaling_test.go b/azurerm/internal/services/compute/windows_virtual_machine_resource_scaling_test.go index ebde5f7d7a2c..517e7515a8eb 100644 --- a/azurerm/internal/services/compute/windows_virtual_machine_resource_scaling_test.go +++ b/azurerm/internal/services/compute/windows_virtual_machine_resource_scaling_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,11 +12,11 @@ func TestAccWindowsVirtualMachine_scalingAdditionalCapabilitiesUltraSSD(t *testi data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // NOTE: this requires a large-ish machine to provision Config: r.scalingAdditionalCapabilitiesUltraSSD(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -31,10 +30,10 @@ func TestAccWindowsVirtualMachine_scalingAvailabilitySet(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingAvailabilitySet(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -48,10 +47,10 @@ func TestAccWindowsVirtualMachine_scalingDedicatedHost(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingDedicatedHost(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -65,10 +64,10 @@ func TestAccWindowsVirtualMachine_scalingDedicatedHostUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingDedicatedHostInitial(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -77,7 +76,7 @@ func TestAccWindowsVirtualMachine_scalingDedicatedHostUpdate(t *testing.T) { ), { Config: r.scalingDedicatedHost(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -86,7 +85,7 @@ func TestAccWindowsVirtualMachine_scalingDedicatedHostUpdate(t *testing.T) { ), { Config: r.scalingDedicatedHostUpdate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -95,7 +94,7 @@ func TestAccWindowsVirtualMachine_scalingDedicatedHostUpdate(t *testing.T) { ), { Config: r.scalingDedicatedHostRemoved(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -109,10 +108,62 @@ func TestAccWindowsVirtualMachine_scalingProximityPlacementGroup(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingProximityPlacementGroup(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep( + "admin_password", + ), + }) +} + +func TestAccWindowsVirtualMachine_scalingProximityPlacementGroupUpdate(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") + r := WindowsVirtualMachineResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.scalingProximityPlacementGroup(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep( + "admin_password", + ), + { + Config: r.scalingProximityPlacementGroupUpdate(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep( + "admin_password", + ), + }) +} + +func TestAccWindowsVirtualMachine_scalingProximityPlacementGroupRemoved(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") + r := WindowsVirtualMachineResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.scalingProximityPlacementGroup(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep( + "admin_password", + ), + { + Config: r.scalingProximityPlacementGroupRemoved(data), + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -126,10 +177,10 @@ func TestAccWindowsVirtualMachine_scalingMachineSizeUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingMachineSize(data, "Standard_F2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -138,7 +189,7 @@ func TestAccWindowsVirtualMachine_scalingMachineSizeUpdate(t *testing.T) { ), { Config: r.scalingMachineSize(data, "Standard_F4"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -147,7 +198,7 @@ func TestAccWindowsVirtualMachine_scalingMachineSizeUpdate(t *testing.T) { ), { Config: r.scalingMachineSize(data, "Standard_F4s_v2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -161,10 +212,10 @@ func TestAccWindowsVirtualMachine_scalingZones(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine", "test") r := WindowsVirtualMachineResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingZone(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -467,6 +518,85 @@ resource "azurerm_windows_virtual_machine" "test" { `, r.template(data), data.RandomInteger) } +func (r WindowsVirtualMachineResource) scalingProximityPlacementGroupUpdate(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_proximity_placement_group" "test" { + name = "acctestPPG-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location +} + +resource "azurerm_proximity_placement_group" "second" { + name = "acctestPPG2-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location +} + +resource "azurerm_windows_virtual_machine" "test" { + name = local.vm_name + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + size = "Standard_F2" + admin_username = "adminuser" + admin_password = "P@$$w0rd1234!" + proximity_placement_group_id = azurerm_proximity_placement_group.second.id + network_interface_ids = [ + azurerm_network_interface.test.id, + ] + + os_disk { + caching = "ReadWrite" + storage_account_type = "Standard_LRS" + } + + source_image_reference { + publisher = "MicrosoftWindowsServer" + offer = "WindowsServer" + sku = "2016-Datacenter" + version = "latest" + } +} +`, r.template(data), data.RandomInteger, data.RandomInteger) +} + +func (r WindowsVirtualMachineResource) scalingProximityPlacementGroupRemoved(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_proximity_placement_group" "test" { + name = "acctestPPG-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location +} + +resource "azurerm_windows_virtual_machine" "test" { + name = local.vm_name + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + size = "Standard_F2" + admin_username = "adminuser" + admin_password = "P@$$w0rd1234!" + network_interface_ids = [ + azurerm_network_interface.test.id, + ] + + os_disk { + caching = "ReadWrite" + storage_account_type = "Standard_LRS" + } + + source_image_reference { + publisher = "MicrosoftWindowsServer" + offer = "WindowsServer" + sku = "2016-Datacenter" + version = "latest" + } +} +`, r.template(data), data.RandomInteger) +} + func (r WindowsVirtualMachineResource) scalingMachineSize(data acceptance.TestData, size string) string { return fmt.Sprintf(` %s diff --git a/azurerm/internal/services/compute/windows_virtual_machine_resource_test.go b/azurerm/internal/services/compute/windows_virtual_machine_resource_test.go index e8f3f8a41b58..fccc8d38dbbf 100644 --- a/azurerm/internal/services/compute/windows_virtual_machine_resource_test.go +++ b/azurerm/internal/services/compute/windows_virtual_machine_resource_test.go @@ -4,17 +4,17 @@ import ( "context" "fmt" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) type WindowsVirtualMachineResource struct { } -func (t WindowsVirtualMachineResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t WindowsVirtualMachineResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.VirtualMachineID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_auth_resource_test.go b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_auth_resource_test.go index ba45aa9200f9..d8a448599e1b 100644 --- a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_auth_resource_test.go +++ b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_auth_resource_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,10 +12,10 @@ func TestAccWindowsVirtualMachineScaleSet_authPassword(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_disk_data_resource_test.go b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_disk_data_resource_test.go index 38c59efd7408..08c0dc0f9b5a 100644 --- a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_disk_data_resource_test.go +++ b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_disk_data_resource_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,10 +12,10 @@ func TestAccWindowsVirtualMachineScaleSet_disksDataDiskBasic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskBasic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -30,10 +29,10 @@ func TestAccWindowsVirtualMachineScaleSet_disksDataDiskCaching(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskCaching(data, "None"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -42,7 +41,7 @@ func TestAccWindowsVirtualMachineScaleSet_disksDataDiskCaching(t *testing.T) { ), { Config: r.disksDataDiskCaching(data, "ReadOnly"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -51,7 +50,7 @@ func TestAccWindowsVirtualMachineScaleSet_disksDataDiskCaching(t *testing.T) { ), { Config: r.disksDataDiskCaching(data, "ReadWrite"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -65,10 +64,10 @@ func TestAccWindowsVirtualMachineScaleSet_disksDataDiskDiskEncryptionSet(t *test data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDisk_diskEncryptionSet(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -82,11 +81,11 @@ func TestAccWindowsVirtualMachineScaleSet_disksDataDiskResizing(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // 30GB Config: r.disksDataDiskResize(data, 30), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -96,7 +95,7 @@ func TestAccWindowsVirtualMachineScaleSet_disksDataDiskResizing(t *testing.T) { { // 60GB Config: r.disksDataDiskResize(data, 60), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -110,10 +109,10 @@ func TestAccWindowsVirtualMachineScaleSet_disksDataDiskMultiple(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskMultiple(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -127,10 +126,10 @@ func TestAccWindowsVirtualMachineScaleSet_disksDataDiskRemove(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskBasic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -139,7 +138,7 @@ func TestAccWindowsVirtualMachineScaleSet_disksDataDiskRemove(t *testing.T) { ), { Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -153,11 +152,11 @@ func TestAccWindowsVirtualMachineScaleSet_disksDataDiskScaling(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // no disks Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -167,7 +166,7 @@ func TestAccWindowsVirtualMachineScaleSet_disksDataDiskScaling(t *testing.T) { { // one disk Config: r.disksDataDiskBasic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -177,7 +176,7 @@ func TestAccWindowsVirtualMachineScaleSet_disksDataDiskScaling(t *testing.T) { { // two disks Config: r.disksDataDiskMultiple(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -187,7 +186,7 @@ func TestAccWindowsVirtualMachineScaleSet_disksDataDiskScaling(t *testing.T) { { // no disks Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -201,10 +200,10 @@ func TestAccWindowsVirtualMachineScaleSet_disksDataDiskStorageAccountTypeStandar data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskStorageAccountType(data, "Standard_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -218,10 +217,10 @@ func TestAccWindowsVirtualMachineScaleSet_disksDataDiskStorageAccountTypeStandar data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskStorageAccountType(data, "StandardSSD_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -235,10 +234,10 @@ func TestAccWindowsVirtualMachineScaleSet_disksDataDiskStorageAccountTypePremium data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskStorageAccountType(data, "Premium_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -253,10 +252,10 @@ func TestAccWindowsVirtualMachineScaleSet_disksDataDiskStorageAccountTypeUltraSS r := WindowsVirtualMachineScaleSetResource{} // Are supported in East US 2, SouthEast Asia, and North Europe, in two availability zones per region - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskStorageAccountTypeUltraSSDLRS(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -271,10 +270,10 @@ func TestAccWindowsVirtualMachineScaleSet_disksDataDiskStorageAccountTypeUltraSS r := WindowsVirtualMachineScaleSetResource{} // Are supported in East US 2, SouthEast Asia, and North Europe, in two availability zones per region - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskStorageAccountTypeUltraSSDLRSWithIOPS(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -289,10 +288,10 @@ func TestAccWindowsVirtualMachineScaleSet_disksDataDiskStorageAccountTypeUltraSS r := WindowsVirtualMachineScaleSetResource{} // Are supported in East US 2, SouthEast Asia, and North Europe, in two availability zones per region - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskStorageAccountTypeUltraSSDLRSWithMBPS(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -307,10 +306,10 @@ func TestAccWindowsVirtualMachineScaleSet_disksDataDiskStorageAccountTypeUltraSS r := WindowsVirtualMachineScaleSetResource{} // Are supported in East US 2, SouthEast Asia, and North Europe, in two availability zones per region - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskStorageAccountTypeUltraSSDLRSWithIOPSAndMBPS(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -324,10 +323,10 @@ func TestAccWindowsVirtualMachineScaleSet_disksDataDiskWriteAcceleratorEnabled(t data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksDataDiskWriteAcceleratorEnabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_disk_os_resource_test.go b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_disk_os_resource_test.go index d4ec4416a276..b2b7cdd3734b 100644 --- a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_disk_os_resource_test.go +++ b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_disk_os_resource_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,10 +12,10 @@ func TestAccWindowsVirtualMachineScaleSet_disksOSDiskCaching(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksOSDiskCaching(data, "None"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -25,7 +24,7 @@ func TestAccWindowsVirtualMachineScaleSet_disksOSDiskCaching(t *testing.T) { ), { Config: r.disksOSDiskCaching(data, "ReadOnly"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -34,7 +33,7 @@ func TestAccWindowsVirtualMachineScaleSet_disksOSDiskCaching(t *testing.T) { ), { Config: r.disksOSDiskCaching(data, "ReadWrite"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -48,11 +47,11 @@ func TestAccWindowsVirtualMachineScaleSet_disksOSDiskCustomSize(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // unset Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -61,7 +60,7 @@ func TestAccWindowsVirtualMachineScaleSet_disksOSDiskCustomSize(t *testing.T) { ), { Config: r.disksOSDiskCustomSize(data, 128), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -71,7 +70,7 @@ func TestAccWindowsVirtualMachineScaleSet_disksOSDiskCustomSize(t *testing.T) { { // resize a second time to confirm https://github.com/Azure/azure-rest-api-specs/issues/1906 Config: r.disksOSDiskCustomSize(data, 256), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -85,10 +84,10 @@ func TestAccWindowsVirtualMachineScaleSet_disksOSDiskEphemeral(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksOSDiskEphemeral(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -102,10 +101,10 @@ func TestAccWindowsVirtualMachineScaleSet_disksOSDiskDiskEncryptionSet(t *testin data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksOSDisk_diskEncryptionSet(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -119,10 +118,10 @@ func TestAccWindowsVirtualMachineScaleSet_disksOSDiskStorageAccountTypeStandardL data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksOSDiskStorageAccountType(data, "Standard_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -136,10 +135,10 @@ func TestAccWindowsVirtualMachineScaleSet_disksOSDiskStorageAccountTypeStandardS data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksOSDiskStorageAccountType(data, "StandardSSD_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -153,10 +152,10 @@ func TestAccWindowsVirtualMachineScaleSet_disksOSDiskStorageAccountTypePremiumLR data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksOSDiskStorageAccountType(data, "Premium_LRS"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -170,10 +169,10 @@ func TestAccWindowsVirtualMachineScaleSet_disksOSDiskWriteAcceleratorEnabled(t * data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.disksOSDiskWriteAcceleratorEnabled(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_extensions_test.go b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_extensions_test.go index 9f4cb8d96afa..65884a511614 100644 --- a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_extensions_test.go +++ b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_extensions_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,10 +12,10 @@ func TestAccWindowsVirtualMachineScaleSet_extensionDoNotRunOnOverProvisionedMach data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionDoNotRunOnOverProvisionedMachines(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -30,10 +29,10 @@ func TestAccWindowsVirtualMachineScaleSet_extensionsDoNotRunOnOverProvisionedMac data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionDoNotRunOnOverProvisionedMachines(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -42,7 +41,7 @@ func TestAccWindowsVirtualMachineScaleSet_extensionsDoNotRunOnOverProvisionedMac ), { Config: r.extensionDoNotRunOnOverProvisionedMachines(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -51,7 +50,7 @@ func TestAccWindowsVirtualMachineScaleSet_extensionsDoNotRunOnOverProvisionedMac ), { Config: r.extensionDoNotRunOnOverProvisionedMachines(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -65,10 +64,10 @@ func TestAccWindowsVirtualMachineScaleSet_extensionBasic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionBasic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -80,17 +79,17 @@ func TestAccWindowsVirtualMachineScaleSet_extensionForceUpdateTag(t *testing.T) data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionForceUpdateTag(data, "first"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("admin_password", "extension.0.protected_settings"), { Config: r.extensionForceUpdateTag(data, "second"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -102,10 +101,10 @@ func TestAccWindowsVirtualMachineScaleSet_extensionMultiple(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionMultiple(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -117,10 +116,10 @@ func TestAccWindowsVirtualMachineScaleSet_extensionOnlySettings(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionOnlySettings(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -132,24 +131,24 @@ func TestAccWindowsVirtualMachineScaleSet_extensionUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionBasic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("admin_password", "extension.0.protected_settings"), { Config: r.extensionUpdate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("admin_password", "extension.0.protected_settings"), { Config: r.extensionBasic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -161,10 +160,10 @@ func TestAccWindowsVirtualMachineScaleSet_extensionsRollingUpgradeWithHealthExte data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionsRollingUpgradeWithHealthExtension(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -176,10 +175,10 @@ func TestAccWindowsVirtualMachineScaleSet_extensionsAutomaticUpgradeWithHealthEx data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionsAutomaticUpgradeWithHealthExtension(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -195,10 +194,10 @@ func TestAccWindowsVirtualMachineScaleSet_extensionWithTimeBudget(t *testing.T) data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionWithTimeBudget(data, "PT30M"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -210,24 +209,24 @@ func TestAccWindowsVirtualMachineScaleSet_extensionWithTimeBudgetUpdate(t *testi data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionWithTimeBudget(data, "PT30M"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("admin_password", "extension.0.protected_settings"), { Config: r.extensionWithTimeBudget(data, "PT1H"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("admin_password", "extension.0.protected_settings"), { Config: r.extensionWithTimeBudget(data, "PT30M"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -239,10 +238,10 @@ func TestAccWindowsVirtualMachineScaleSet_extensionTimeBudgetWithoutExtensions(t data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionTimeBudgetWithoutExtensions(data, "PT30M"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -254,24 +253,24 @@ func TestAccWindowsVirtualMachineScaleSet_extensionTimeBudgetWithoutExtensionsUp data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionTimeBudgetWithoutExtensions(data, "PT30M"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("admin_password", "extension.0.protected_settings"), { Config: r.extensionTimeBudgetWithoutExtensions(data, "PT1H"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("admin_password", "extension.0.protected_settings"), { Config: r.extensionTimeBudgetWithoutExtensions(data, "PT30M"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -283,10 +282,10 @@ func TestAccWindowsVirtualMachineScaleSet_extensionsAutomaticUpgradeWithServiceF data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionsAutomaticUpgradeWithServiceFabricExtension(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -302,17 +301,17 @@ func TestAccWindowsVirtualMachineScaleSet_extensionAutomaticUpgradeUpdate(t *tes data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.extensionsWithHealthExtension(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("admin_password", "extension.0.protected_settings", "enable_automatic_updates"), { Config: r.extensionsAutomaticUpgradeWithHealthExtension(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_identity_resource_test.go b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_identity_resource_test.go index b560eb2a64ae..28477034bd95 100644 --- a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_identity_resource_test.go +++ b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_identity_resource_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,10 +12,10 @@ func TestAccWindowsVirtualMachineScaleSet_identityNone(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.%").HasValue("0"), ), @@ -31,10 +30,10 @@ func TestAccWindowsVirtualMachineScaleSet_identitySystemAssigned(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.identitySystemAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.0.principal_id").Exists(), ), @@ -45,7 +44,7 @@ func TestAccWindowsVirtualMachineScaleSet_identitySystemAssigned(t *testing.T) { { // disable it Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.%").HasValue("0"), ), @@ -55,7 +54,7 @@ func TestAccWindowsVirtualMachineScaleSet_identitySystemAssigned(t *testing.T) { ), { Config: r.identitySystemAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.0.principal_id").Exists(), ), @@ -70,10 +69,10 @@ func TestAccWindowsVirtualMachineScaleSet_identityUserAssigned(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.identityUserAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -83,7 +82,7 @@ func TestAccWindowsVirtualMachineScaleSet_identityUserAssigned(t *testing.T) { { // disable it Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.%").HasValue("0"), ), @@ -93,7 +92,7 @@ func TestAccWindowsVirtualMachineScaleSet_identityUserAssigned(t *testing.T) { ), { Config: r.identityUserAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -103,7 +102,7 @@ func TestAccWindowsVirtualMachineScaleSet_identityUserAssigned(t *testing.T) { { // second Config: r.identityUserAssignedUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -117,10 +116,10 @@ func TestAccWindowsVirtualMachineScaleSet_identitySystemAssignedUserAssigned(t * data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.identitySystemAssignedUserAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.0.principal_id").Exists(), ), @@ -131,7 +130,7 @@ func TestAccWindowsVirtualMachineScaleSet_identitySystemAssignedUserAssigned(t * { // disable it Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.%").HasValue("0"), ), @@ -141,7 +140,7 @@ func TestAccWindowsVirtualMachineScaleSet_identitySystemAssignedUserAssigned(t * ), { Config: r.identitySystemAssignedUserAssigned(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.0.principal_id").Exists(), ), diff --git a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_images_resource_test.go b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_images_resource_test.go index a98c603dd33c..292470773539 100644 --- a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_images_resource_test.go +++ b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_images_resource_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,10 +12,10 @@ func TestAccWindowsVirtualMachineScaleSet_imagesAutomaticUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.imagesAutomaticUpdate(data, "2016-Datacenter"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -26,7 +25,7 @@ func TestAccWindowsVirtualMachineScaleSet_imagesAutomaticUpdate(t *testing.T) { ), { Config: r.imagesAutomaticUpdate(data, "2019-Datacenter"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -41,10 +40,10 @@ func TestAccWindowsVirtualMachineScaleSet_imagesDisableAutomaticUpdate(t *testin data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.imagesDisableAutomaticUpdate(data, "2016-Datacenter"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -54,7 +53,7 @@ func TestAccWindowsVirtualMachineScaleSet_imagesDisableAutomaticUpdate(t *testin ), { Config: r.imagesDisableAutomaticUpdate(data, "2019-Datacenter"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -69,7 +68,7 @@ func TestAccWindowsVirtualMachineScaleSet_imagesFromCapturedVirtualMachineImage( data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // provision a standard Virtual Machine with an Unmanaged Disk Config: r.imagesFromVirtualMachinePrerequisitesWithVM(data), @@ -85,7 +84,7 @@ func TestAccWindowsVirtualMachineScaleSet_imagesFromCapturedVirtualMachineImage( { // then provision a Virtual Machine Scale Set using this image Config: r.imagesFromVirtualMachine(data, "first"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -95,7 +94,7 @@ func TestAccWindowsVirtualMachineScaleSet_imagesFromCapturedVirtualMachineImage( { // then update the image on this Virtual Machine Scale Set Config: r.imagesFromVirtualMachine(data, "second"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), // Ensure the storage account and disk size has not changed check.That(data.ResourceName).Key("os_disk.0.storage_account_type").HasValue("Standard_LRS"), @@ -112,10 +111,10 @@ func TestAccWindowsVirtualMachineScaleSet_imagesManualUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.imagesManualUpdate(data, "2016-Datacenter"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -124,7 +123,7 @@ func TestAccWindowsVirtualMachineScaleSet_imagesManualUpdate(t *testing.T) { ), { Config: r.imagesManualUpdate(data, "2019-Datacenter"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -138,10 +137,10 @@ func TestAccWindowsVirtualMachineScaleSet_imagesManualUpdateExternalRoll(t *test data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.imagesManualUpdateExternalRoll(data, "2016-Datacenter"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -150,7 +149,7 @@ func TestAccWindowsVirtualMachineScaleSet_imagesManualUpdateExternalRoll(t *test ), { Config: r.imagesManualUpdateExternalRoll(data, "2019-Datacenter"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -164,10 +163,10 @@ func TestAccWindowsVirtualMachineScaleSet_imagesRollingUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.imagesRollingUpdate(data, "2019-Datacenter"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -176,7 +175,7 @@ func TestAccWindowsVirtualMachineScaleSet_imagesRollingUpdate(t *testing.T) { ), { Config: r.imagesRollingUpdate(data, "2019-Datacenter"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -190,10 +189,10 @@ func TestAccWindowsVirtualMachineScaleSet_imagesPlan(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.imagesPlan(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_network_resource_test.go b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_network_resource_test.go index e2549c17c464..50b67920ff09 100644 --- a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_network_resource_test.go +++ b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_network_resource_test.go @@ -5,7 +5,6 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -14,10 +13,10 @@ func TestAccWindowsVirtualMachineScaleSet_networkAcceleratedNetworking(t *testin data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkAcceleratedNetworking(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -31,10 +30,10 @@ func TestAccWindowsVirtualMachineScaleSet_networkAcceleratedNetworkingUpdated(t data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkAcceleratedNetworking(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -43,7 +42,7 @@ func TestAccWindowsVirtualMachineScaleSet_networkAcceleratedNetworkingUpdated(t ), { Config: r.networkAcceleratedNetworking(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -52,7 +51,7 @@ func TestAccWindowsVirtualMachineScaleSet_networkAcceleratedNetworkingUpdated(t ), { Config: r.networkAcceleratedNetworking(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -66,10 +65,10 @@ func TestAccWindowsVirtualMachineScaleSet_networkApplicationGateway(t *testing.T data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkApplicationGateway(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -83,10 +82,10 @@ func TestAccWindowsVirtualMachineScaleSet_networkApplicationSecurityGroup(t *tes data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkApplicationSecurityGroup(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -100,11 +99,11 @@ func TestAccWindowsVirtualMachineScaleSet_networkApplicationSecurityGroupUpdate( data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // none Config: r.networkPrivate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -114,7 +113,7 @@ func TestAccWindowsVirtualMachineScaleSet_networkApplicationSecurityGroupUpdate( { // one Config: r.networkApplicationSecurityGroup(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -124,7 +123,7 @@ func TestAccWindowsVirtualMachineScaleSet_networkApplicationSecurityGroupUpdate( { // another Config: r.networkApplicationSecurityGroupUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -134,7 +133,7 @@ func TestAccWindowsVirtualMachineScaleSet_networkApplicationSecurityGroupUpdate( { // none Config: r.networkPrivate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -148,10 +147,10 @@ func TestAccWindowsVirtualMachineScaleSet_networkDNSServers(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkDNSServers(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -160,7 +159,7 @@ func TestAccWindowsVirtualMachineScaleSet_networkDNSServers(t *testing.T) { ), { Config: r.networkDNSServersUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -174,11 +173,11 @@ func TestAccWindowsVirtualMachineScaleSet_networkIPForwarding(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // enabled Config: r.networkIPForwarding(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -188,7 +187,7 @@ func TestAccWindowsVirtualMachineScaleSet_networkIPForwarding(t *testing.T) { { // disabled Config: r.networkPrivate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -198,7 +197,7 @@ func TestAccWindowsVirtualMachineScaleSet_networkIPForwarding(t *testing.T) { { // enabled Config: r.networkIPForwarding(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -212,10 +211,10 @@ func TestAccWindowsVirtualMachineScaleSet_networkIPv6(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkIPv6(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), ExpectError: regexp.MustCompile("Error expanding `network_interface`: An IPv6 Primary IP Configuration is unsupported - instead add a IPv4 IP Configuration as the Primary and make the IPv6 IP Configuration the secondary"), @@ -227,10 +226,10 @@ func TestAccWindowsVirtualMachineScaleSet_networkLoadBalancer(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkLoadBalancer(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -244,10 +243,10 @@ func TestAccWindowsVirtualMachineScaleSet_networkMultipleIPConfigurations(t *tes data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkMultipleIPConfigurations(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -261,10 +260,10 @@ func TestAccWindowsVirtualMachineScaleSet_networkMultipleIPConfigurationsIPv6(t data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkMultipleIPConfigurationsIPv6(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -278,10 +277,10 @@ func TestAccWindowsVirtualMachineScaleSet_networkMultipleNICs(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkMultipleNICs(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -295,10 +294,10 @@ func TestAccWindowsVirtualMachineScaleSet_networkMultipleNICsMultipleIPConfigura data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkMultipleNICsMultipleIPConfigurations(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -312,10 +311,10 @@ func TestAccWindowsVirtualMachineScaleSet_networkMultipleNICsMultiplePublicIPs(t data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkMultipleNICsMultiplePublicIPs(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -329,10 +328,10 @@ func TestAccWindowsVirtualMachineScaleSet_networkMultipleNICsWithDifferentDNSSer data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkMultipleNICsWithDifferentDNSServers(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -346,10 +345,10 @@ func TestAccWindowsVirtualMachineScaleSet_networkNetworkSecurityGroup(t *testing data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkNetworkSecurityGroup(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -363,11 +362,11 @@ func TestAccWindowsVirtualMachineScaleSet_networkNetworkSecurityGroupUpdate(t *t data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // without Config: r.networkPrivate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -377,7 +376,7 @@ func TestAccWindowsVirtualMachineScaleSet_networkNetworkSecurityGroupUpdate(t *t { // add one Config: r.networkNetworkSecurityGroup(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -387,7 +386,7 @@ func TestAccWindowsVirtualMachineScaleSet_networkNetworkSecurityGroupUpdate(t *t { // change it Config: r.networkNetworkSecurityGroupUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -397,7 +396,7 @@ func TestAccWindowsVirtualMachineScaleSet_networkNetworkSecurityGroupUpdate(t *t { // remove it Config: r.networkPrivate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -411,10 +410,10 @@ func TestAccWindowsVirtualMachineScaleSet_networkPrivate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPrivate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -428,10 +427,10 @@ func TestAccWindowsVirtualMachineScaleSet_networkPublicIP(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPublicIP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -445,10 +444,10 @@ func TestAccWindowsVirtualMachineScaleSet_networkPublicIPDomainNameLabel(t *test data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPublicIPDomainNameLabel(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -462,10 +461,10 @@ func TestAccWindowsVirtualMachineScaleSet_networkPublicIPFromPrefix(t *testing.T data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPublicIPFromPrefix(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -479,10 +478,10 @@ func TestAccWindowsVirtualMachineScaleSet_networkPublicIPTags(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkPublicIPTags(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_other_resource_test.go b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_other_resource_test.go index 6bcd35d55bca..c424c50a48da 100644 --- a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_other_resource_test.go +++ b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_other_resource_test.go @@ -5,7 +5,6 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -14,10 +13,10 @@ func TestAccWindowsVirtualMachineScaleSet_otherAdditionalUnattendContent(t *test data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherAdditionalUnattendContent(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -32,11 +31,11 @@ func TestAccWindowsVirtualMachineScaleSet_otherBootDiagnostics(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // Enabled Config: r.otherBootDiagnostics(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -46,7 +45,7 @@ func TestAccWindowsVirtualMachineScaleSet_otherBootDiagnostics(t *testing.T) { { // Removed Config: r.otherBootDiagnosticsDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -56,7 +55,7 @@ func TestAccWindowsVirtualMachineScaleSet_otherBootDiagnostics(t *testing.T) { { // Enabled Config: r.otherBootDiagnostics(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -70,11 +69,11 @@ func TestAccWindowsVirtualMachineScaleSet_otherBootDiagnosticsMananged(t *testin data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // Enabled Config: r.otherBootDiagnosticsManaged(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -84,7 +83,7 @@ func TestAccWindowsVirtualMachineScaleSet_otherBootDiagnosticsMananged(t *testin { // Removed Config: r.otherBootDiagnosticsDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -94,7 +93,7 @@ func TestAccWindowsVirtualMachineScaleSet_otherBootDiagnosticsMananged(t *testin { // Enabled Config: r.otherBootDiagnosticsManaged(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -108,10 +107,10 @@ func TestAccWindowsVirtualMachineScaleSet_otherComputerNamePrefix(t *testing.T) data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherComputerNamePrefix(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -125,7 +124,7 @@ func TestAccWindowsVirtualMachineScaleSet_otherComputerNamePrefixInvalid(t *test data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherComputerNamePrefixInvalid(data), ExpectError: regexp.MustCompile("unable to assume default computer name prefix"), @@ -137,10 +136,10 @@ func TestAccWindowsVirtualMachineScaleSet_otherCustomData(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherCustomData(data, "/bin/bash"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -150,7 +149,7 @@ func TestAccWindowsVirtualMachineScaleSet_otherCustomData(t *testing.T) { ), { Config: r.otherCustomData(data, "/bin/zsh"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -161,7 +160,7 @@ func TestAccWindowsVirtualMachineScaleSet_otherCustomData(t *testing.T) { { // removed Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -172,14 +171,31 @@ func TestAccWindowsVirtualMachineScaleSet_otherCustomData(t *testing.T) { }) } +func TestAccWindowsVirtualMachineScaleSet_otherForceDelete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") + r := WindowsVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.otherForceDelete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep( + "admin_password", + ), + }) +} + func TestAccWindowsVirtualMachineScaleSet_otherEnableAutomaticUpdatesDisabled(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherEnableAutomaticUpdatesDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -189,7 +205,7 @@ func TestAccWindowsVirtualMachineScaleSet_otherEnableAutomaticUpdatesDisabled(t { // enabled Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -198,7 +214,7 @@ func TestAccWindowsVirtualMachineScaleSet_otherEnableAutomaticUpdatesDisabled(t ), { Config: r.otherEnableAutomaticUpdatesDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -212,10 +228,10 @@ func TestAccWindowsVirtualMachineScaleSet_otherPrioritySpotDeallocate(t *testing data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherPrioritySpot(data, "Deallocate"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -229,10 +245,10 @@ func TestAccWindowsVirtualMachineScaleSet_otherPrioritySpotDelete(t *testing.T) data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherPrioritySpot(data, "Delete"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -246,11 +262,11 @@ func TestAccWindowsVirtualMachineScaleSet_otherPrioritySpotMaxBidPrice(t *testin data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // expensive, but guarantees this test will pass Config: r.otherPrioritySpotMaxBidPrice(data, "0.5000"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -259,7 +275,7 @@ func TestAccWindowsVirtualMachineScaleSet_otherPrioritySpotMaxBidPrice(t *testin ), { Config: r.otherPrioritySpotMaxBidPrice(data, "-1"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -273,10 +289,10 @@ func TestAccWindowsVirtualMachineScaleSet_otherPriorityRegular(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherPriorityRegular(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -290,10 +306,10 @@ func TestAccWindowsVirtualMachineScaleSet_otherRequiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -308,10 +324,10 @@ func TestAccWindowsVirtualMachineScaleSet_otherSecret(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherSecret(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -321,7 +337,7 @@ func TestAccWindowsVirtualMachineScaleSet_otherSecret(t *testing.T) { { // update Config: r.otherSecretUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -332,7 +348,7 @@ func TestAccWindowsVirtualMachineScaleSet_otherSecret(t *testing.T) { { // removed Config: r.otherSecretRemoved(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -346,10 +362,10 @@ func TestAccWindowsVirtualMachineScaleSet_otherTags(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherTags(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -359,7 +375,7 @@ func TestAccWindowsVirtualMachineScaleSet_otherTags(t *testing.T) { { // add one Config: r.otherTagsUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -369,7 +385,7 @@ func TestAccWindowsVirtualMachineScaleSet_otherTags(t *testing.T) { { // remove all Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -383,10 +399,10 @@ func TestAccWindowsVirtualMachineScaleSet_otherTimeZone(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherTimeZone(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -400,10 +416,10 @@ func TestAccWindowsVirtualMachineScaleSet_otherVMAgent(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherVMAgent(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -417,10 +433,10 @@ func TestAccWindowsVirtualMachineScaleSet_otherVMAgentDisabled(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherVMAgent(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -434,10 +450,10 @@ func TestAccWindowsVirtualMachineScaleSet_otherWinRMHTTP(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherWinRMHTTP(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -451,10 +467,10 @@ func TestAccWindowsVirtualMachineScaleSet_otherWinRMHTTPS(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherWinRMHTTPS(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -468,16 +484,16 @@ func TestAccWindowsVirtualMachineScaleSet_updateHealthProbe(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.updateLoadBalancerHealthProbeSKUBasic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Config: r.updateLoadBalancerHealthProbeSKUStandard(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -488,10 +504,10 @@ func TestAccWindowsVirtualMachineScaleSet_otherUpgradeMode(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherUpgradeMode(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -506,10 +522,10 @@ func TestAccWindowsVirtualMachineScaleSet_otherScaleInPolicy(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherScaleInPolicy(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("scale_in_policy").HasValue("Default"), ), @@ -524,11 +540,11 @@ func TestAccWindowsVirtualMachineScaleSet_otherTerminateNotification(t *testing. data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ // turn terminate notification on { Config: r.otherTerminateNotification(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("terminate_notification.#").HasValue("1"), check.That(data.ResourceName).Key("terminate_notification.0.enabled").HasValue("true"), @@ -540,7 +556,7 @@ func TestAccWindowsVirtualMachineScaleSet_otherTerminateNotification(t *testing. // turn terminate notification off { Config: r.otherTerminateNotification(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("terminate_notification.#").HasValue("1"), check.That(data.ResourceName).Key("terminate_notification.0.enabled").HasValue("false"), @@ -552,7 +568,7 @@ func TestAccWindowsVirtualMachineScaleSet_otherTerminateNotification(t *testing. // turn terminate notification on again { Config: r.otherTerminateNotification(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("terminate_notification.#").HasValue("1"), check.That(data.ResourceName).Key("terminate_notification.0.enabled").HasValue("true"), @@ -568,11 +584,11 @@ func TestAccWindowsVirtualMachineScaleSet_otherAutomaticRepairsPolicy(t *testing data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ // turn automatic repair on { Config: r.otherAutomaticRepairsPolicy(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -582,7 +598,7 @@ func TestAccWindowsVirtualMachineScaleSet_otherAutomaticRepairsPolicy(t *testing // turn automatic repair off { Config: r.otherAutomaticRepairsPolicy(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -592,7 +608,7 @@ func TestAccWindowsVirtualMachineScaleSet_otherAutomaticRepairsPolicy(t *testing // turn automatic repair on again { Config: r.otherAutomaticRepairsPolicy(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -606,10 +622,10 @@ func TestAccWindowsVirtualMachineScaleSet_otherEncryptionAtHostEnabled(t *testin data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherEncryptionAtHostEnabled(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -621,24 +637,24 @@ func TestAccWindowsVirtualMachineScaleSet_otherEncryptionAtHostEnabledUpdate(t * data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherEncryptionAtHostEnabled(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("admin_password", "extension.0.protected_settings"), { Config: r.otherEncryptionAtHostEnabled(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep("admin_password", "extension.0.protected_settings"), { Config: r.otherEncryptionAtHostEnabled(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -650,10 +666,10 @@ func TestAccWindowsVirtualMachineScaleSet_otherEncryptionAtHostEnabledWithCMK(t data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherEncryptionAtHostEnabledWithCMK(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -665,10 +681,10 @@ func TestAccWindowsVirtualMachineScaleSet_otherPlatformFaultDomainCount(t *testi data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherPlatformFaultDomainCount(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -680,10 +696,10 @@ func TestAccWindowsVirtualMachineScaleSet_otherRollingUpgradePolicyUpdate(t *tes data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherRollingUpgradePolicyUpdate(data, 40, 40, 40, "PT0S"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -692,7 +708,7 @@ func TestAccWindowsVirtualMachineScaleSet_otherRollingUpgradePolicyUpdate(t *tes ), { Config: r.otherRollingUpgradePolicyUpdate(data, 30, 100, 100, "PT1S"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -706,10 +722,10 @@ func TestAccWindowsVirtualMachineScaleSet_otherHealthProbeUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherHealthProbe(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -719,7 +735,7 @@ func TestAccWindowsVirtualMachineScaleSet_otherHealthProbeUpdate(t *testing.T) { ), { Config: r.otherHealthProbeUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -734,10 +750,10 @@ func TestAccWindowsVirtualMachineScaleSet_otherLicenseTypeUpdated(t *testing.T) data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.otherLicenseTypeDefault(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -746,7 +762,7 @@ func TestAccWindowsVirtualMachineScaleSet_otherLicenseTypeUpdated(t *testing.T) ), { Config: r.otherLicenseType(data, "Windows_Client"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("license_type").HasValue("Windows_Client"), ), @@ -756,7 +772,7 @@ func TestAccWindowsVirtualMachineScaleSet_otherLicenseTypeUpdated(t *testing.T) ), { Config: r.otherLicenseTypeDefault(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -1077,6 +1093,53 @@ resource "azurerm_windows_virtual_machine_scale_set" "test" { `, r.template(data), customData) } +func (r WindowsVirtualMachineScaleSetResource) otherForceDelete(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features { + virtual_machine_scale_set { + force_delete = true + } + } +} + +%s + +resource "azurerm_windows_virtual_machine_scale_set" "test" { + name = local.vm_name + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + sku = "Standard_F2" + instances = 1 + admin_username = "adminuser" + admin_password = "P@ssword1234!" + + source_image_reference { + publisher = "MicrosoftWindowsServer" + offer = "WindowsServer" + sku = "2019-Datacenter" + version = "latest" + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + network_interface { + name = "example" + primary = true + + ip_configuration { + name = "internal" + primary = true + subnet_id = azurerm_subnet.test.id + } + } +} +`, r.template(data)) +} + func (r WindowsVirtualMachineScaleSetResource) otherEnableAutomaticUpdatesDisabled(data acceptance.TestData) string { return fmt.Sprintf(` %s diff --git a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_resource.go b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_resource.go index 60656710e2ec..7034f9aaa774 100644 --- a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_resource.go +++ b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_resource.go @@ -6,8 +6,6 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" @@ -18,12 +16,13 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/base64" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceWindowsVirtualMachineScaleSet() *schema.Resource { - return &schema.Resource{ +func resourceWindowsVirtualMachineScaleSet() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceWindowsVirtualMachineScaleSetCreate, Read: resourceWindowsVirtualMachineScaleSetRead, Update: resourceWindowsVirtualMachineScaleSetUpdate, @@ -34,19 +33,19 @@ func resourceWindowsVirtualMachineScaleSet() *schema.Resource { return err }, importVirtualMachineScaleSet(compute.Windows, "azurerm_windows_virtual_machine_scale_set")), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(60 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(60 * time.Minute), - Delete: schema.DefaultTimeout(60 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(60 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(60 * time.Minute), + Delete: pluginsdk.DefaultTimeout(60 * time.Minute), }, // TODO: exposing requireGuestProvisionSignal once it's available // https://github.com/Azure/azure-rest-api-specs/pull/7246 - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: computeValidate.VirtualMachineName, @@ -58,14 +57,14 @@ func resourceWindowsVirtualMachineScaleSet() *schema.Resource { // Required "admin_username": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, "admin_password": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, Sensitive: true, @@ -78,13 +77,13 @@ func resourceWindowsVirtualMachineScaleSet() *schema.Resource { "os_disk": VirtualMachineScaleSetOSDiskSchema(), "instances": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, ValidateFunc: validation.IntAtLeast(0), }, "sku": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, @@ -101,7 +100,7 @@ func resourceWindowsVirtualMachineScaleSet() *schema.Resource { "boot_diagnostics": bootDiagnosticsSchema(), "computer_name_prefix": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, // Computed since we reuse the VM name if one's not specified @@ -116,25 +115,25 @@ func resourceWindowsVirtualMachineScaleSet() *schema.Resource { "data_disk": VirtualMachineScaleSetDataDiskSchema(), "do_not_run_extensions_on_overprovisioned_machines": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, "enable_automatic_updates": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, "encryption_at_host_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, }, "eviction_policy": { // only applicable when `priority` is set to `Spot` - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ @@ -146,14 +145,14 @@ func resourceWindowsVirtualMachineScaleSet() *schema.Resource { "extension": VirtualMachineScaleSetExtensionsSchema(), "extensions_time_budget": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: "PT1H30M", ValidateFunc: validate.ISO8601DurationBetween("PT15M", "PT2H"), }, "health_probe_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: azure.ValidateResourceID, }, @@ -161,14 +160,14 @@ func resourceWindowsVirtualMachineScaleSet() *schema.Resource { "identity": VirtualMachineScaleSetIdentitySchema(), "license_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ "None", "Windows_Client", "Windows_Server", }, false), - DiffSuppressFunc: func(_, old, new string, _ *schema.ResourceData) bool { + DiffSuppressFunc: func(_, old, new string, _ *pluginsdk.ResourceData) bool { if old == "None" && new == "" || old == "" && new == "None" { return true } @@ -178,14 +177,14 @@ func resourceWindowsVirtualMachineScaleSet() *schema.Resource { }, "max_bid_price": { - Type: schema.TypeFloat, + Type: pluginsdk.TypeFloat, Optional: true, Default: -1, ValidateFunc: computeValidate.SpotMaxPrice, }, "overprovision": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, @@ -193,14 +192,14 @@ func resourceWindowsVirtualMachineScaleSet() *schema.Resource { "plan": planSchema(), "platform_fault_domain_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, ForceNew: true, Computed: true, }, "priority": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, Default: string(compute.Regular), @@ -211,14 +210,14 @@ func resourceWindowsVirtualMachineScaleSet() *schema.Resource { }, "provision_vm_agent": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, ForceNew: true, }, "proximity_placement_group_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: azure.ValidateResourceID, @@ -231,13 +230,13 @@ func resourceWindowsVirtualMachineScaleSet() *schema.Resource { "secret": windowsSecretSchema(), "single_placement_group": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, "source_image_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: azure.ValidateResourceID, }, @@ -247,13 +246,13 @@ func resourceWindowsVirtualMachineScaleSet() *schema.Resource { "tags": tags.Schema(), "timezone": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: computeValidate.VirtualMachineTimeZone(), }, "upgrade_mode": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, Default: string(compute.Manual), @@ -267,14 +266,14 @@ func resourceWindowsVirtualMachineScaleSet() *schema.Resource { "winrm_listener": winRmListenerSchema(), "zone_balance": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, ForceNew: true, Default: false, }, "scale_in_policy": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: string(compute.Default), ValidateFunc: validation.StringInSlice([]string{ @@ -290,14 +289,14 @@ func resourceWindowsVirtualMachineScaleSet() *schema.Resource { // Computed "unique_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, } } -func resourceWindowsVirtualMachineScaleSetCreate(d *schema.ResourceData, meta interface{}) error { +func resourceWindowsVirtualMachineScaleSetCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMScaleSetClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -380,7 +379,7 @@ func resourceWindowsVirtualMachineScaleSetCreate(d *schema.ResourceData, meta in return fmt.Errorf("A `rolling_upgrade_policy` block must be specified when `upgrade_mode` is set to %q", string(upgradeMode)) } - winRmListenersRaw := d.Get("winrm_listener").(*schema.Set).List() + winRmListenersRaw := d.Get("winrm_listener").(*pluginsdk.Set).List() winRmListeners := expandWinRMListener(winRmListenersRaw) secretsRaw := d.Get("secret").([]interface{}) @@ -439,7 +438,7 @@ func resourceWindowsVirtualMachineScaleSetCreate(d *schema.ResourceData, meta in hasHealthExtension := false if vmExtensionsRaw, ok := d.GetOk("extension"); ok { - virtualMachineProfile.ExtensionProfile, hasHealthExtension, err = expandVirtualMachineScaleSetExtensions(vmExtensionsRaw.(*schema.Set).List()) + virtualMachineProfile.ExtensionProfile, hasHealthExtension, err = expandVirtualMachineScaleSetExtensions(vmExtensionsRaw.(*pluginsdk.Set).List()) if err != nil { return err } @@ -581,7 +580,7 @@ func resourceWindowsVirtualMachineScaleSetCreate(d *schema.ResourceData, meta in return resourceWindowsVirtualMachineScaleSetRead(d, meta) } -func resourceWindowsVirtualMachineScaleSetUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceWindowsVirtualMachineScaleSetUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMScaleSetClient ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -860,7 +859,7 @@ func resourceWindowsVirtualMachineScaleSetUpdate(d *schema.ResourceData, meta in if d.HasChanges("extension", "extensions_time_budget") { updateInstances = true - extensionProfile, _, err := expandVirtualMachineScaleSetExtensions(d.Get("extension").(*schema.Set).List()) + extensionProfile, _, err := expandVirtualMachineScaleSetExtensions(d.Get("extension").(*pluginsdk.Set).List()) if err != nil { return err } @@ -891,7 +890,7 @@ func resourceWindowsVirtualMachineScaleSetUpdate(d *schema.ResourceData, meta in return resourceWindowsVirtualMachineScaleSetRead(d, meta) } -func resourceWindowsVirtualMachineScaleSetRead(d *schema.ResourceData, meta interface{}) error { +func resourceWindowsVirtualMachineScaleSetRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMScaleSetClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -1113,7 +1112,7 @@ func resourceWindowsVirtualMachineScaleSetRead(d *schema.ResourceData, meta inte return tags.FlattenAndSet(d, resp.Tags) } -func resourceWindowsVirtualMachineScaleSetDelete(d *schema.ResourceData, meta interface{}) error { +func resourceWindowsVirtualMachineScaleSetDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.VMScaleSetClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -1161,7 +1160,7 @@ func resourceWindowsVirtualMachineScaleSetDelete(d *schema.ResourceData, meta in } log.Printf("[DEBUG] Deleting Windows Virtual Machine Scale Set %q (Resource Group %q)..", id.Name, id.ResourceGroup) - // @ArcturusZhang (mimicking from windows_virtual_machine_resource.go): sending `nil` here omits this value from being sent + // @ArcturusZhang (mimicking from windows_virtual_machine_pluginsdk.go): sending `nil` here omits this value from being sent // which matches the previous behaviour - we're only splitting this out so it's clear why // TODO: support force deletion once it's out of Preview, if applicable var forceDeletion *bool = nil diff --git a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_resource_test.go b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_resource_test.go index 787cf2368670..aed3e6b405d7 100644 --- a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_resource_test.go +++ b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_resource_test.go @@ -4,18 +4,17 @@ import ( "context" "fmt" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" - - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) type WindowsVirtualMachineScaleSetResource struct { } -func (r WindowsVirtualMachineScaleSetResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (r WindowsVirtualMachineScaleSetResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.VirtualMachineScaleSetID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_scaling_resource_test.go b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_scaling_resource_test.go index 41b105f3cfe6..8923eb56cceb 100644 --- a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_scaling_resource_test.go +++ b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_scaling_resource_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,10 +12,10 @@ func TestAccWindowsVirtualMachineScaleSet_scalingAutoScale(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingAutoScale(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -30,10 +29,10 @@ func TestAccWindowsVirtualMachineScaleSet_scalingInstanceCount(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingInstanceCount(data, 1), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -42,7 +41,7 @@ func TestAccWindowsVirtualMachineScaleSet_scalingInstanceCount(t *testing.T) { ), { Config: r.scalingInstanceCount(data, 3), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -51,7 +50,7 @@ func TestAccWindowsVirtualMachineScaleSet_scalingInstanceCount(t *testing.T) { ), { Config: r.scalingInstanceCount(data, 5), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -61,7 +60,7 @@ func TestAccWindowsVirtualMachineScaleSet_scalingInstanceCount(t *testing.T) { { // update the count but the `sku` should be ignored Config: r.scalingInstanceCountIgnoreUpdatedSku(data, 3), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -80,10 +79,10 @@ func TestAccWindowsVirtualMachineScaleSet_scalingOverProvisionDisabled(t *testin data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingOverProvisionDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -97,10 +96,10 @@ func TestAccWindowsVirtualMachineScaleSet_scalingProximityPlacementGroup(t *test data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingProximityPlacementGroup(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -114,10 +113,10 @@ func TestAccWindowsVirtualMachineScaleSet_scalingSinglePlacementGroupDisabled(t data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingSinglePlacementGroupDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -131,10 +130,10 @@ func TestAccWindowsVirtualMachineScaleSet_scalingSinglePlacementGroupDisabledUpd data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.authPassword(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -143,7 +142,7 @@ func TestAccWindowsVirtualMachineScaleSet_scalingSinglePlacementGroupDisabledUpd ), { Config: r.scalingSinglePlacementGroupDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -157,10 +156,10 @@ func TestAccWindowsVirtualMachineScaleSet_scalingUpdateSku(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingUpdateSku(data, "Standard_F2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -169,7 +168,7 @@ func TestAccWindowsVirtualMachineScaleSet_scalingUpdateSku(t *testing.T) { ), { Config: r.scalingUpdateSku(data, "Standard_F4"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -178,7 +177,7 @@ func TestAccWindowsVirtualMachineScaleSet_scalingUpdateSku(t *testing.T) { ), { Config: r.scalingUpdateSku(data, "Standard_F2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -188,7 +187,7 @@ func TestAccWindowsVirtualMachineScaleSet_scalingUpdateSku(t *testing.T) { { // confirms that the `instances` count comes from the API Config: r.scalingUpdateSkuIgnoredUpdatedCount(data, "Standard_F2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -203,10 +202,10 @@ func TestAccWindowsVirtualMachineScaleSet_scalingZonesSingle(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingZonesSingle(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -220,10 +219,10 @@ func TestAccWindowsVirtualMachineScaleSet_scalingZonesMultiple(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingZonesMultiple(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -237,10 +236,10 @@ func TestAccWindowsVirtualMachineScaleSet_scalingZonesBalance(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_windows_virtual_machine_scale_set", "test") r := WindowsVirtualMachineScaleSetResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scalingZonesBalance(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/containers/container_group_resource.go b/azurerm/internal/services/containers/container_group_resource.go index af4897a36def..19aeacc96c78 100644 --- a/azurerm/internal/services/containers/container_group_resource.go +++ b/azurerm/internal/services/containers/container_group_resource.go @@ -9,10 +9,7 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/containerinstance/mgmt/2019-12-01/containerinstance" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" @@ -23,12 +20,13 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceContainerGroup() *schema.Resource { - return &schema.Resource{ +func resourceContainerGroup() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceContainerGroupCreate, Read: resourceContainerGroupRead, Delete: resourceContainerGroupDelete, @@ -36,16 +34,16 @@ func resourceContainerGroup() *schema.Resource { // TODO: replace this with an importer which validates the ID during import Importer: pluginsdk.DefaultImporter(), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, @@ -56,7 +54,7 @@ func resourceContainerGroup() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "ip_address_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: "Public", ForceNew: true, @@ -68,7 +66,7 @@ func resourceContainerGroup() *schema.Resource { }, "network_profile_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, @@ -81,7 +79,7 @@ func resourceContainerGroup() *schema.Resource { }, "os_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, DiffSuppressFunc: suppress.CaseDifference, @@ -92,27 +90,27 @@ func resourceContainerGroup() *schema.Resource { }, "image_registry_credential": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "server": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, "username": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, "password": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, Sensitive: true, ForceNew: true, @@ -123,14 +121,14 @@ func resourceContainerGroup() *schema.Resource { }, "identity": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, Computed: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ "SystemAssigned", @@ -139,16 +137,16 @@ func resourceContainerGroup() *schema.Resource { }, false), }, "principal_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "identity_ids": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MinItems: 1, ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: msivalidate.UserAssignedIdentityID, }, }, @@ -159,7 +157,7 @@ func resourceContainerGroup() *schema.Resource { "tags": tags.Schema(), "restart_policy": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, Default: string(containerinstance.Always), @@ -172,29 +170,29 @@ func resourceContainerGroup() *schema.Resource { }, "dns_name_label": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, }, "exposed_port": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, // change to 'Required' in 3.0 of the provider ForceNew: true, - Computed: true, // remove in 3.0 of the provider - ConfigMode: schema.SchemaConfigModeAttr, // remove in 3.0 of the provider + Computed: true, // remove in 3.0 of the provider + ConfigMode: pluginsdk.SchemaConfigModeAttr, // remove in 3.0 of the provider Set: resourceContainerGroupPortsHash, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "port": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, ForceNew: true, ValidateFunc: validate.PortNumber, }, "protocol": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, Default: string(containerinstance.TCP), @@ -208,47 +206,47 @@ func resourceContainerGroup() *schema.Resource { }, "container": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, "image": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, "cpu": { - Type: schema.TypeFloat, + Type: pluginsdk.TypeFloat, Required: true, ForceNew: true, }, "memory": { - Type: schema.TypeFloat, + Type: pluginsdk.TypeFloat, Required: true, ForceNew: true, }, //lintignore:XS003 "gpu": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, ForceNew: true, ValidateFunc: validation.IntInSlice([]int{ @@ -259,7 +257,7 @@ func resourceContainerGroup() *schema.Resource { }, "sku": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ @@ -273,21 +271,21 @@ func resourceContainerGroup() *schema.Resource { }, "ports": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, ForceNew: true, Set: resourceContainerGroupPortsHash, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "port": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, ForceNew: true, ValidateFunc: validate.PortNumber, }, "protocol": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, Default: string(containerinstance.TCP), @@ -301,78 +299,78 @@ func resourceContainerGroup() *schema.Resource { }, "environment_variables": { - Type: schema.TypeMap, + Type: pluginsdk.TypeMap, ForceNew: true, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "secure_environment_variables": { - Type: schema.TypeMap, + Type: pluginsdk.TypeMap, Optional: true, ForceNew: true, Sensitive: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "commands": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, Computed: true, ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, }, "volume": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, "mount_path": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, "read_only": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, ForceNew: true, Default: false, }, "share_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, "storage_account_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, "storage_account_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Sensitive: true, ForceNew: true, @@ -380,33 +378,33 @@ func resourceContainerGroup() *schema.Resource { }, "empty_dir": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, ForceNew: true, Default: false, }, "git_repo": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, ForceNew: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "url": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "directory": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, }, "revision": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, }, @@ -415,12 +413,12 @@ func resourceContainerGroup() *schema.Resource { }, "secret": { - Type: schema.TypeMap, + Type: pluginsdk.TypeMap, ForceNew: true, Optional: true, Sensitive: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, }, @@ -435,28 +433,28 @@ func resourceContainerGroup() *schema.Resource { }, "diagnostics": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, ForceNew: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "log_analytics": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, ForceNew: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "workspace_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.IsUUID, }, "workspace_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, Sensitive: true, ForceNew: true, @@ -464,7 +462,7 @@ func resourceContainerGroup() *schema.Resource { }, "log_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ @@ -474,11 +472,11 @@ func resourceContainerGroup() *schema.Resource { }, "metadata": { - Type: schema.TypeMap, + Type: pluginsdk.TypeMap, Optional: true, ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, }, @@ -489,12 +487,12 @@ func resourceContainerGroup() *schema.Resource { }, "ip_address": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "fqdn": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, @@ -502,32 +500,32 @@ func resourceContainerGroup() *schema.Resource { Optional: true, MaxItems: 1, ForceNew: true, - Type: schema.TypeList, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Type: pluginsdk.TypeList, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "nameservers": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "search_domains": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, }, "options": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, }, @@ -538,7 +536,7 @@ func resourceContainerGroup() *schema.Resource { } } -func resourceContainerGroupCreate(d *schema.ResourceData, meta interface{}) error { +func resourceContainerGroupCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.GroupsClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -629,7 +627,7 @@ func resourceContainerGroupCreate(d *schema.ResourceData, meta interface{}) erro return resourceContainerGroupRead(d, meta) } -func resourceContainerGroupUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceContainerGroupUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.GroupsClient ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -652,7 +650,7 @@ func resourceContainerGroupUpdate(d *schema.ResourceData, meta interface{}) erro return resourceContainerGroupRead(d, meta) } -func resourceContainerGroupRead(d *schema.ResourceData, meta interface{}) error { +func resourceContainerGroupRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.GroupsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -723,7 +721,7 @@ func resourceContainerGroupRead(d *schema.ResourceData, meta interface{}) error return tags.FlattenAndSet(d, resp.Tags) } -func flattenPorts(ports []interface{}) *schema.Set { +func flattenPorts(ports []interface{}) *pluginsdk.Set { if len(ports) > 0 { flatPorts := make([]interface{}, 0) for _, p := range ports { @@ -742,12 +740,12 @@ func flattenPorts(ports []interface{}) *schema.Set { } flatPorts = append(flatPorts, port) } - return schema.NewSet(resourceContainerGroupPortsHash, flatPorts) + return pluginsdk.NewSet(resourceContainerGroupPortsHash, flatPorts) } - return schema.NewSet(resourceContainerGroupPortsHash, make([]interface{}, 0)) + return pluginsdk.NewSet(resourceContainerGroupPortsHash, make([]interface{}, 0)) } -func resourceContainerGroupDelete(d *schema.ResourceData, meta interface{}) error { +func resourceContainerGroupDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.GroupsClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -799,16 +797,16 @@ func resourceContainerGroupDelete(d *schema.ResourceData, meta interface{}) erro // TODO: remove when https://github.com/Azure/azure-sdk-for-go/issues/5082 has been fixed log.Printf("[DEBUG] Waiting for Container Group %q (Resource Group %q) to be finish deleting", name, resourceGroup) - stateConf := &resource.StateChangeConf{ + stateConf := &pluginsdk.StateChangeConf{ Pending: []string{"Attached"}, Target: []string{"Detached"}, Refresh: containerGroupEnsureDetachedFromNetworkProfileRefreshFunc(ctx, networkProfileClient, networkProfileResourceGroup, networkProfileName, resourceGroup, name), MinTimeout: 15 * time.Second, ContinuousTargetOccurence: 5, - Timeout: d.Timeout(schema.TimeoutDelete), + Timeout: d.Timeout(pluginsdk.TimeoutDelete), } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("Error waiting for Container Group %q (Resource Group %q) to finish deleting: %s", name, resourceGroup, err) } } @@ -819,7 +817,7 @@ func resourceContainerGroupDelete(d *schema.ResourceData, meta interface{}) erro func containerGroupEnsureDetachedFromNetworkProfileRefreshFunc(ctx context.Context, client *network.ProfilesClient, networkProfileResourceGroup, networkProfileName, - containerResourceGroupName, containerName string) resource.StateRefreshFunc { + containerResourceGroupName, containerName string) pluginsdk.StateRefreshFunc { return func() (interface{}, string, error) { profile, err := client.Get(ctx, networkProfileResourceGroup, networkProfileName, "") if err != nil { @@ -863,7 +861,7 @@ func containerGroupEnsureDetachedFromNetworkProfileRefreshFunc(ctx context.Conte } } -func expandContainerGroupContainers(d *schema.ResourceData) (*[]containerinstance.Container, *[]containerinstance.Port, *[]containerinstance.Volume, error) { +func expandContainerGroupContainers(d *pluginsdk.ResourceData) (*[]containerinstance.Container, *[]containerinstance.Port, *[]containerinstance.Volume, error) { containersConfig := d.Get("container").([]interface{}) containers := make([]containerinstance.Container, 0) containerInstancePorts := make([]containerinstance.Port, 0) @@ -909,7 +907,7 @@ func expandContainerGroupContainers(d *schema.ResourceData) (*[]containerinstanc } } - if v, ok := data["ports"].(*schema.Set); ok && len(v.List()) > 0 { + if v, ok := data["ports"].(*pluginsdk.Set); ok && len(v.List()) > 0 { var ports []containerinstance.ContainerPort for _, v := range v.List() { portObj := v.(map[string]interface{}) @@ -983,7 +981,7 @@ func expandContainerGroupContainers(d *schema.ResourceData) (*[]containerinstanc // Determine ports to be exposed on the group level, based on exposed_ports // and on what ports have been exposed on individual containers. - if v, ok := d.Get("exposed_port").(*schema.Set); ok && len(v.List()) > 0 { + if v, ok := d.Get("exposed_port").(*pluginsdk.Set); ok && len(v.List()) > 0 { cgpMap := make(map[int32]map[containerinstance.ContainerGroupNetworkProtocol]bool) for _, p := range containerInstancePorts { if val, ok := cgpMap[*p.Port]; ok { @@ -1043,7 +1041,7 @@ func expandContainerEnvironmentVariables(input interface{}, secure bool) *[]cont return &output } -func expandContainerGroupIdentity(d *schema.ResourceData) *containerinstance.ContainerGroupIdentity { +func expandContainerGroupIdentity(d *pluginsdk.ResourceData) *containerinstance.ContainerGroupIdentity { v := d.Get("identity") identities := v.([]interface{}) if len(identities) == 0 { @@ -1068,7 +1066,7 @@ func expandContainerGroupIdentity(d *schema.ResourceData) *containerinstance.Con return &cgIdentity } -func expandContainerImageRegistryCredentials(d *schema.ResourceData) *[]containerinstance.ImageRegistryCredential { +func expandContainerImageRegistryCredentials(d *pluginsdk.ResourceData) *[]containerinstance.ImageRegistryCredential { credsRaw := d.Get("image_registry_credential").([]interface{}) if len(credsRaw) == 0 { return nil @@ -1291,7 +1289,7 @@ func flattenContainerGroupIdentity(identity *containerinstance.ContainerGroupIde return []interface{}{result}, nil } -func flattenContainerImageRegistryCredentials(d *schema.ResourceData, input *[]containerinstance.ImageRegistryCredential) []interface{} { +func flattenContainerImageRegistryCredentials(d *pluginsdk.ResourceData, input *[]containerinstance.ImageRegistryCredential) []interface{} { if input == nil { return nil } @@ -1322,7 +1320,7 @@ func flattenContainerImageRegistryCredentials(d *schema.ResourceData, input *[]c return output } -func flattenContainerGroupContainers(d *schema.ResourceData, containers *[]containerinstance.Container, containerGroupVolumes *[]containerinstance.Volume) []interface{} { +func flattenContainerGroupContainers(d *pluginsdk.ResourceData, containers *[]containerinstance.Container, containerGroupVolumes *[]containerinstance.Volume) []interface{} { // map old container names to index so we can look up things up nameIndexMap := map[string]int{} for i, c := range d.Get("container").([]interface{}) { @@ -1419,7 +1417,7 @@ func flattenContainerGroupContainers(d *schema.ResourceData, containers *[]conta return containerCfg } -func flattenContainerEnvironmentVariables(input *[]containerinstance.EnvironmentVariable, isSecure bool, d *schema.ResourceData, oldContainerIndex int) map[string]interface{} { +func flattenContainerEnvironmentVariables(input *[]containerinstance.EnvironmentVariable, isSecure bool, d *pluginsdk.ResourceData, oldContainerIndex int) map[string]interface{} { output := make(map[string]interface{}) if input == nil { @@ -1625,7 +1623,7 @@ func expandContainerGroupDiagnostics(input []interface{}) *containerinstance.Con return &containerinstance.ContainerGroupDiagnostics{LogAnalytics: &logAnalytics} } -func flattenContainerGroupDiagnostics(d *schema.ResourceData, input *containerinstance.ContainerGroupDiagnostics) []interface{} { +func flattenContainerGroupDiagnostics(d *pluginsdk.ResourceData, input *containerinstance.ContainerGroupDiagnostics) []interface{} { if input == nil { return []interface{}{} } @@ -1678,7 +1676,7 @@ func resourceContainerGroupPortsHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%s-", m["protocol"].(string))) } - return schema.HashString(buf.String()) + return pluginsdk.HashString(buf.String()) } func flattenContainerGroupDnsConfig(input *containerinstance.DNSConfiguration) []interface{} { @@ -1722,11 +1720,11 @@ func expandContainerGroupDnsConfig(input interface{}) *containerinstance.DNSConf nameservers = append(nameservers, v.(string)) } options := []string{} - for _, v := range config["options"].(*schema.Set).List() { + for _, v := range config["options"].(*pluginsdk.Set).List() { options = append(options, v.(string)) } searchDomains := []string{} - for _, v := range config["search_domains"].(*schema.Set).List() { + for _, v := range config["search_domains"].(*pluginsdk.Set).List() { searchDomains = append(searchDomains, v.(string)) } diff --git a/azurerm/internal/services/containers/container_group_resource_test.go b/azurerm/internal/services/containers/container_group_resource_test.go index 50ad1d576525..0bff2c56e004 100644 --- a/azurerm/internal/services/containers/container_group_resource_test.go +++ b/azurerm/internal/services/containers/container_group_resource_test.go @@ -5,13 +5,12 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -22,14 +21,14 @@ func TestAccContainerGroup_SystemAssignedIdentity(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_group", "test") r := ContainerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.SystemAssignedIdentity(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.0.type").HasValue("SystemAssigned"), check.That(data.ResourceName).Key("identity.0.identity_ids.#").HasValue("0"), - resource.TestMatchResourceAttr(data.ResourceName, "identity.0.principal_id", validate.UUIDRegExp), + acceptance.TestMatchResourceAttr(data.ResourceName, "identity.0.principal_id", validate.UUIDRegExp), ), }, data.ImportStep("identity.0.principal_id"), @@ -40,10 +39,10 @@ func TestAccContainerGroup_UserAssignedIdentity(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_group", "test") r := ContainerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.UserAssignedIdentity(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.0.type").HasValue("UserAssigned"), check.That(data.ResourceName).Key("identity.0.identity_ids.#").HasValue("1"), @@ -58,14 +57,14 @@ func TestAccContainerGroup_multipleAssignedIdentities(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_group", "test") r := ContainerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.MultipleAssignedIdentities(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.0.type").HasValue("SystemAssigned, UserAssigned"), check.That(data.ResourceName).Key("identity.0.identity_ids.#").HasValue("1"), - resource.TestMatchResourceAttr(data.ResourceName, "identity.0.principal_id", validate.UUIDRegExp), + acceptance.TestMatchResourceAttr(data.ResourceName, "identity.0.principal_id", validate.UUIDRegExp), ), }, data.ImportStep(), @@ -76,10 +75,10 @@ func TestAccContainerGroup_imageRegistryCredentials(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_group", "test") r := ContainerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.imageRegistryCredentials(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("image_registry_credential.#").HasValue("2"), check.That(data.ResourceName).Key("image_registry_credential.0.server").HasValue("hub.docker.com"), @@ -101,10 +100,10 @@ func TestAccContainerGroup_imageRegistryCredentialsUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_group", "test") r := ContainerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.imageRegistryCredentials(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("image_registry_credential.#").HasValue("2"), check.That(data.ResourceName).Key("image_registry_credential.0.server").HasValue("hub.docker.com"), @@ -118,7 +117,7 @@ func TestAccContainerGroup_imageRegistryCredentialsUpdate(t *testing.T) { }, { Config: r.imageRegistryCredentialsUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("image_registry_credential.#").HasValue("1"), check.That(data.ResourceName).Key("image_registry_credential.0.server").HasValue("hub.docker.com"), @@ -134,10 +133,10 @@ func TestAccContainerGroup_logTypeUnset(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_group", "test") r := ContainerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.logTypeUnset(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("diagnostics.0.log_analytics.#").HasValue("1"), check.That(data.ResourceName).Key("diagnostics.0.log_analytics.0.log_type").HasValue(""), @@ -154,10 +153,10 @@ func TestAccContainerGroup_linuxBasic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_group", "test") r := ContainerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.linuxBasic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("container.#").HasValue("1"), check.That(data.ResourceName).Key("os_type").HasValue("Linux"), @@ -175,10 +174,10 @@ func TestAccContainerGroup_exposedPort(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_group", "test") r := ContainerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.exposedPort(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("container.#").HasValue("1"), check.That(data.ResourceName).Key("os_type").HasValue("Linux"), @@ -196,10 +195,10 @@ func TestAccContainerGroup_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_group", "test") r := ContainerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.linuxBasic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -214,17 +213,17 @@ func TestAccContainerGroup_linuxBasicUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_group", "test") r := ContainerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.linuxBasic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("container.#").HasValue("1"), ), }, { Config: r.linuxBasicUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("container.#").HasValue("2"), check.That(data.ResourceName).Key("container.0.ports.#").HasValue("2"), @@ -237,17 +236,17 @@ func TestAccContainerGroup_exposedPortUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_group", "test") r := ContainerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.exposedPort(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("exposed_port.#").HasValue("1"), ), }, { Config: r.exposedPortUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("container.0.ports.#").HasValue("2"), check.That(data.ResourceName).Key("exposed_port.#").HasValue("2"), @@ -260,17 +259,17 @@ func TestAccContainerGroup_linuxBasicTagsUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_group", "test") r := ContainerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.linuxBasic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("container.#").HasValue("1"), ), }, { Config: r.linuxBasicTagsUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("container.#").HasValue("1"), check.That(data.ResourceName).Key("tags.OS").HasValue("Linux"), @@ -283,10 +282,10 @@ func TestAccContainerGroup_linuxComplete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_group", "test") r := ContainerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.linuxComplete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("container.#").HasValue("1"), check.That(data.ResourceName).Key("container.0.ports.#").HasValue("1"), @@ -350,13 +349,13 @@ func TestAccContainerGroup_virtualNetwork(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_group", "test") r := ContainerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.virtualNetwork(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - resource.TestCheckNoResourceAttr(data.ResourceName, "dns_label_name"), - resource.TestCheckNoResourceAttr(data.ResourceName, "identity"), + acceptance.TestCheckNoResourceAttr(data.ResourceName, "dns_label_name"), + acceptance.TestCheckNoResourceAttr(data.ResourceName, "identity"), check.That(data.ResourceName).Key("container.#").HasValue("1"), check.That(data.ResourceName).Key("os_type").HasValue("Linux"), check.That(data.ResourceName).Key("container.0.ports.#").HasValue("1"), @@ -372,10 +371,10 @@ func TestAccContainerGroup_windowsBasic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_group", "test") r := ContainerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.windowsBasic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("container.#").HasValue("1"), check.That(data.ResourceName).Key("os_type").HasValue("Windows"), @@ -390,10 +389,10 @@ func TestAccContainerGroup_windowsComplete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_group", "test") r := ContainerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.windowsComplete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("container.#").HasValue("1"), check.That(data.ResourceName).Key("container.0.ports.#").HasValue("1"), @@ -449,10 +448,10 @@ func TestAccContainerGroup_withPrivateEmpty(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_group", "test") r := ContainerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.withPrivateEmpty(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -466,10 +465,10 @@ func TestAccContainerGroup_gitRepoVolume(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_group", "test") r := ContainerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.gitRepoVolume(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -481,10 +480,10 @@ func TestAccContainerGroup_emptyDirVolume(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_group", "test") r := ContainerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.emptyDirVolume(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -496,10 +495,10 @@ func TestAccContainerGroup_secretVolume(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_group", "test") r := ContainerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.secretVolume(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -1600,7 +1599,7 @@ resource "azurerm_container_group" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger) } -func (t ContainerGroupResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t ContainerGroupResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := azure.ParseAzureResourceID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/containers/container_registry_data_source.go b/azurerm/internal/services/containers/container_registry_data_source.go index bef833f4cf3b..f87cb7ffa1b8 100644 --- a/azurerm/internal/services/containers/container_registry_data_source.go +++ b/azurerm/internal/services/containers/container_registry_data_source.go @@ -4,27 +4,26 @@ import ( "fmt" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/validate" - - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceContainerRegistry() *schema.Resource { - return &schema.Resource{ +func dataSourceContainerRegistry() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceContainerRegistryRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.ContainerRegistryName, }, @@ -34,32 +33,32 @@ func dataSourceContainerRegistry() *schema.Resource { "location": azure.SchemaLocationForDataSource(), "admin_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "admin_password": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "admin_username": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "login_server": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "sku": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "storage_account_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, @@ -68,7 +67,7 @@ func dataSourceContainerRegistry() *schema.Resource { } } -func dataSourceContainerRegistryRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceContainerRegistryRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.RegistriesClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/containers/container_registry_data_source_test.go b/azurerm/internal/services/containers/container_registry_data_source_test.go index 6efe1dfae70d..d70d7a7124af 100644 --- a/azurerm/internal/services/containers/container_registry_data_source_test.go +++ b/azurerm/internal/services/containers/container_registry_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -16,10 +15,10 @@ func TestAccDataSourceAzureRMContainerRegistry_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_container_registry", "test") r := ContainerRegistryDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("name").Exists(), check.That(data.ResourceName).Key("resource_group_name").Exists(), check.That(data.ResourceName).Key("location").Exists(), diff --git a/azurerm/internal/services/containers/container_registry_resource.go b/azurerm/internal/services/containers/container_registry_resource.go index a31901eb0129..1ac5b6aa5f0f 100644 --- a/azurerm/internal/services/containers/container_registry_resource.go +++ b/azurerm/internal/services/containers/container_registry_resource.go @@ -7,12 +7,8 @@ import ( "strings" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry" "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" @@ -24,13 +20,15 @@ import ( identityParse "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/msi/parse" identityValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/msi/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceContainerRegistry() *schema.Resource { - return &schema.Resource{ +func resourceContainerRegistry() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceContainerRegistryCreate, Read: resourceContainerRegistryRead, Update: resourceContainerRegistryUpdate, @@ -45,16 +43,16 @@ func resourceContainerRegistry() *schema.Resource { // TODO: replace this with an importer which validates the ID during import Importer: pluginsdk.DefaultImporter(), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate2.ContainerRegistryName, @@ -65,7 +63,7 @@ func resourceContainerRegistry() *schema.Resource { "location": azure.SchemaLocation(), "sku": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: string(containerregistry.Classic), DiffSuppressFunc: suppress.CaseDifference, @@ -78,82 +76,90 @@ func resourceContainerRegistry() *schema.Resource { }, "admin_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, // TODO 3.0 - Remove below property "georeplication_locations": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, Deprecated: "Deprecated in favour of `georeplications`", Computed: true, ConflictsWith: []string{"georeplications"}, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, Set: location.HashCode, }, "georeplications": { - Type: schema.TypeSet, + // Don't make this a TypeSet since TypeSet has bugs when there is a nested property using `StateFunc`. + // See: https://github.com/hashicorp/terraform-plugin-sdk/issues/160 + Type: pluginsdk.TypeList, Optional: true, Computed: true, // TODO -- remove this when deprecation resolves ConflictsWith: []string{"georeplication_locations"}, - ConfigMode: schema.SchemaConfigModeAttr, // TODO -- remove in 3.0, because this property is optional and computed, it has to be declared as empty array to remove existed values - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + ConfigMode: pluginsdk.SchemaConfigModeAttr, // TODO -- remove in 3.0, because this property is optional and computed, it has to be declared as empty array to remove existed values + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "location": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: location.EnhancedValidate, StateFunc: location.StateFunc, DiffSuppressFunc: location.DiffSuppressFunc, }, + "zone_redundancy_enabled": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: false, + }, + "tags": tags.Schema(), }, }, }, "public_network_access_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, "storage_account_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, }, "login_server": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "admin_username": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "admin_password": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "identity": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, Computed: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.StringInSlice([]string{ @@ -163,15 +169,19 @@ func resourceContainerRegistry() *schema.Resource { }, false), }, "principal_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, + Computed: true, + }, + "tenant_id": { + Type: pluginsdk.TypeString, Computed: true, }, "identity_ids": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MinItems: 1, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: identityValidate.UserAssignedIdentityID, }, }, @@ -180,42 +190,42 @@ func resourceContainerRegistry() *schema.Resource { }, "encryption": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, Computed: true, MaxItems: 1, - ConfigMode: schema.SchemaConfigModeAttr, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + ConfigMode: pluginsdk.SchemaConfigModeAttr, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, "identity_client_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.IsUUID, }, "key_vault_key_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, - ValidateFunc: keyVaultValidate.NestedItemId, + ValidateFunc: keyVaultValidate.NestedItemIdWithOptionalVersion, }, }, }, }, "network_rule_set": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, Computed: true, MaxItems: 1, - ConfigMode: schema.SchemaConfigModeAttr, // make sure we can set this to an empty array for Premium -> Basic - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + ConfigMode: pluginsdk.SchemaConfigModeAttr, // make sure we can set this to an empty array for Premium -> Basic + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "default_action": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: containerregistry.DefaultActionAllow, ValidateFunc: validation.StringInSlice([]string{ @@ -225,20 +235,20 @@ func resourceContainerRegistry() *schema.Resource { }, "ip_rule": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - ConfigMode: schema.SchemaConfigModeAttr, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + ConfigMode: pluginsdk.SchemaConfigModeAttr, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "action": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ string(containerregistry.Allow), }, false), }, "ip_range": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.CIDR, }, @@ -247,20 +257,20 @@ func resourceContainerRegistry() *schema.Resource { }, "virtual_network": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - ConfigMode: schema.SchemaConfigModeAttr, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + ConfigMode: pluginsdk.SchemaConfigModeAttr, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "action": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ string(containerregistry.Allow), }, false), }, "subnet_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: azure.ValidateResourceID, }, @@ -272,26 +282,26 @@ func resourceContainerRegistry() *schema.Resource { }, "quarantine_policy_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, }, "retention_policy": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, MaxItems: 1, Optional: true, Computed: true, - ConfigMode: schema.SchemaConfigModeAttr, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + ConfigMode: pluginsdk.SchemaConfigModeAttr, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "days": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Default: 7, }, "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, @@ -300,15 +310,15 @@ func resourceContainerRegistry() *schema.Resource { }, "trust_policy": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, MaxItems: 1, Optional: true, Computed: true, - ConfigMode: schema.SchemaConfigModeAttr, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + ConfigMode: pluginsdk.SchemaConfigModeAttr, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, @@ -316,14 +326,21 @@ func resourceContainerRegistry() *schema.Resource { }, }, + "zone_redundancy_enabled": { + Type: pluginsdk.TypeBool, + ForceNew: true, + Optional: true, + Default: false, + }, + "tags": tags.Schema(), }, CustomizeDiff: pluginsdk.CustomizeDiffShim(func(ctx context.Context, d *pluginsdk.ResourceDiff, v interface{}) error { sku := d.Get("sku").(string) - geoReplicationLocations := d.Get("georeplication_locations").(*schema.Set) - geoReplications := d.Get("georeplications").(*schema.Set) - hasGeoReplicationsApplied := geoReplicationLocations.Len() > 0 || geoReplications.Len() > 0 + geoReplicationLocations := d.Get("georeplication_locations").(*pluginsdk.Set) + geoReplications := d.Get("georeplications").([]interface{}) + hasGeoReplicationsApplied := geoReplicationLocations.Len() > 0 || len(geoReplications) > 0 // if locations have been specified for geo-replication then, the SKU has to be Premium if hasGeoReplicationsApplied && !strings.EqualFold(sku, string(containerregistry.Premium)) { return fmt.Errorf("ACR geo-replication can only be applied when using the Premium Sku.") @@ -348,12 +365,26 @@ func resourceContainerRegistry() *schema.Resource { if ok && encryptionEnabled.(bool) && !strings.EqualFold(sku, string(containerregistry.Premium)) { return fmt.Errorf("ACR encryption can only be applied when using the Premium Sku.") } + + // zone redundancy is only available for Premium Sku. + zoneRedundancyEnabled, ok := d.GetOk("zone_redundancy_enabled") + if ok && zoneRedundancyEnabled.(bool) && !strings.EqualFold(sku, string(containerregistry.Premium)) { + return fmt.Errorf("ACR zone redundancy can only be applied when using the Premium Sku") + } + for _, loc := range geoReplications { + loc := loc.(map[string]interface{}) + zoneRedundancyEnabled, ok := loc["zone_redundancy_enabled"] + if ok && zoneRedundancyEnabled.(bool) && !strings.EqualFold(sku, string(containerregistry.Premium)) { + return fmt.Errorf("ACR zone redundancy can only be applied when using the Premium Sku") + } + } + return nil }), } } -func resourceContainerRegistryCreate(d *schema.ResourceData, meta interface{}) error { +func resourceContainerRegistryCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.RegistriesClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -392,8 +423,8 @@ func resourceContainerRegistryCreate(d *schema.ResourceData, meta interface{}) e sku := d.Get("sku").(string) adminUserEnabled := d.Get("admin_enabled").(bool) t := d.Get("tags").(map[string]interface{}) - geoReplicationLocations := d.Get("georeplication_locations").(*schema.Set) - geoReplications := d.Get("georeplications").(*schema.Set) + geoReplicationLocations := d.Get("georeplication_locations").(*pluginsdk.Set) + geoReplications := d.Get("georeplications").([]interface{}) networkRuleSet := expandNetworkRuleSet(d.Get("network_rule_set").([]interface{})) if networkRuleSet != nil && !strings.EqualFold(sku, string(containerregistry.Premium)) { @@ -418,6 +449,12 @@ func resourceContainerRegistryCreate(d *schema.ResourceData, meta interface{}) e if !d.Get("public_network_access_enabled").(bool) { publicNetworkAccess = containerregistry.PublicNetworkAccessDisabled } + + zoneRedundancy := containerregistry.ZoneRedundancyDisabled + if d.Get("zone_redundancy_enabled").(bool) { + zoneRedundancy = containerregistry.ZoneRedundancyEnabled + } + parameters := containerregistry.Registry{ Location: &location, Sku: &containerregistry.Sku{ @@ -427,6 +464,7 @@ func resourceContainerRegistryCreate(d *schema.ResourceData, meta interface{}) e Identity: identity, RegistryProperties: &containerregistry.RegistryProperties{ AdminUserEnabled: utils.Bool(adminUserEnabled), + Encryption: encryption, NetworkRuleSet: networkRuleSet, Policies: &containerregistry.Policies{ QuarantinePolicy: quarantinePolicy, @@ -434,7 +472,7 @@ func resourceContainerRegistryCreate(d *schema.ResourceData, meta interface{}) e TrustPolicy: trustPolicy, }, PublicNetworkAccess: publicNetworkAccess, - Encryption: encryption, + ZoneRedundancy: zoneRedundancy, }, Tags: tags.Expand(t), @@ -462,11 +500,11 @@ func resourceContainerRegistryCreate(d *schema.ResourceData, meta interface{}) e } // the ACR is being created so no previous geo-replication locations - var oldGeoReplicationLocations, newGeoReplicationLocations []*containerregistry.Replication + var oldGeoReplicationLocations, newGeoReplicationLocations []containerregistry.Replication if geoReplicationLocations != nil && geoReplicationLocations.Len() > 0 { newGeoReplicationLocations = expandReplicationsFromLocations(geoReplicationLocations.List()) } else { - newGeoReplicationLocations = expandReplications(geoReplications.List()) + newGeoReplicationLocations = expandReplications(geoReplications) } // geo replications have been specified if len(newGeoReplicationLocations) > 0 { @@ -490,7 +528,7 @@ func resourceContainerRegistryCreate(d *schema.ResourceData, meta interface{}) e return resourceContainerRegistryRead(d, meta) } -func resourceContainerRegistryUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceContainerRegistryUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.RegistriesClient ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -510,13 +548,13 @@ func resourceContainerRegistryUpdate(d *schema.ResourceData, meta interface{}) e old, new := d.GetChange("georeplication_locations") hasGeoReplicationLocationsChanges := d.HasChange("georeplication_locations") - oldGeoReplicationLocations := old.(*schema.Set) - newGeoReplicationLocations := new.(*schema.Set) + oldGeoReplicationLocations := old.(*pluginsdk.Set) + newGeoReplicationLocations := new.(*pluginsdk.Set) oldReplicationsRaw, newReplicationsRaw := d.GetChange("georeplications") hasGeoReplicationsChanges := d.HasChange("georeplications") - oldReplications := oldReplicationsRaw.(*schema.Set) - newReplications := newReplicationsRaw.(*schema.Set) + oldReplications := oldReplicationsRaw.([]interface{}) + newReplications := newReplicationsRaw.([]interface{}) // handle upgrade to Premium SKU first if skuChange && isPremiumSku { @@ -542,6 +580,9 @@ func resourceContainerRegistryUpdate(d *schema.ResourceData, meta interface{}) e identityRaw := d.Get("identity").([]interface{}) identity := expandIdentityProperties(identityRaw) + encryptionRaw := d.Get("encryption").([]interface{}) + encryption := expandEncryption(encryptionRaw) + parameters := containerregistry.RegistryUpdateParameters{ RegistryPropertiesUpdateParameters: &containerregistry.RegistryPropertiesUpdateParameters{ AdminUserEnabled: utils.Bool(adminUserEnabled), @@ -552,19 +593,20 @@ func resourceContainerRegistryUpdate(d *schema.ResourceData, meta interface{}) e TrustPolicy: trustPolicy, }, PublicNetworkAccess: publicNetworkAccess, + Encryption: encryption, }, Identity: identity, Tags: tags.Expand(t), } // geo replication is only supported by Premium Sku - hasGeoReplicationsApplied := newGeoReplicationLocations.Len() > 0 || newReplications.Len() > 0 + hasGeoReplicationsApplied := newGeoReplicationLocations.Len() > 0 || len(newReplications) > 0 if hasGeoReplicationsApplied && !strings.EqualFold(sku, string(containerregistry.Premium)) { return fmt.Errorf("ACR geo-replication can only be applied when using the Premium Sku.") } if hasGeoReplicationsChanges { - err := applyGeoReplicationLocations(d, meta, resourceGroup, name, expandReplications(oldReplications.List()), expandReplications(newReplications.List())) + err := applyGeoReplicationLocations(d, meta, resourceGroup, name, expandReplications(oldReplications), expandReplications(newReplications)) if err != nil { return fmt.Errorf("Error applying geo replications for Container Registry %q (Resource Group %q): %+v", name, resourceGroup, err) } @@ -605,7 +647,7 @@ func resourceContainerRegistryUpdate(d *schema.ResourceData, meta interface{}) e return resourceContainerRegistryRead(d, meta) } -func applyContainerRegistrySku(d *schema.ResourceData, meta interface{}, sku string, resourceGroup string, name string) error { +func applyContainerRegistrySku(d *pluginsdk.ResourceData, meta interface{}, sku string, resourceGroup string, name string) error { client := meta.(*clients.Client).Containers.RegistriesClient ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -629,7 +671,7 @@ func applyContainerRegistrySku(d *schema.ResourceData, meta interface{}, sku str return nil } -func applyGeoReplicationLocations(d *schema.ResourceData, meta interface{}, resourceGroup string, name string, oldGeoReplications []*containerregistry.Replication, newGeoReplications []*containerregistry.Replication) error { +func applyGeoReplicationLocations(d *pluginsdk.ResourceData, meta interface{}, resourceGroup string, name string, oldGeoReplications []containerregistry.Replication, newGeoReplications []containerregistry.Replication) error { replicationClient := meta.(*clients.Client).Containers.ReplicationsClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -637,7 +679,7 @@ func applyGeoReplicationLocations(d *schema.ResourceData, meta interface{}, reso // delete previously deployed locations for _, replication := range oldGeoReplications { - if replication == nil || len(*replication.Location) == 0 { + if replication.Location == nil { continue } oldLocation := azure.NormalizeLocation(*replication.Location) @@ -653,11 +695,11 @@ func applyGeoReplicationLocations(d *schema.ResourceData, meta interface{}, reso // create new geo-replication locations for _, replication := range newGeoReplications { - if replication == nil || len(*replication.Location) == 0 { + if replication.Location == nil { continue } locationToCreate := azure.NormalizeLocation(*replication.Location) - future, err := replicationClient.Create(ctx, resourceGroup, name, locationToCreate, *replication) + future, err := replicationClient.Create(ctx, resourceGroup, name, locationToCreate, replication) if err != nil { return fmt.Errorf("Error creating Container Registry Replication %q (Resource Group %q, Location %q): %+v", name, resourceGroup, locationToCreate, err) } @@ -670,7 +712,7 @@ func applyGeoReplicationLocations(d *schema.ResourceData, meta interface{}, reso return nil } -func resourceContainerRegistryRead(d *schema.ResourceData, meta interface{}) error { +func resourceContainerRegistryRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.RegistriesClient replicationClient := meta.(*clients.Client).Containers.ReplicationsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) @@ -728,6 +770,7 @@ func resourceContainerRegistryRead(d *schema.ResourceData, meta interface{}) err if err := d.Set("encryption", flattenEncryption(properties.Encryption)); err != nil { return fmt.Errorf("Error setting `encryption`: %+v", err) } + d.Set("zone_redundancy_enabled", properties.ZoneRedundancy == containerregistry.ZoneRedundancyEnabled) } if sku := resp.Sku; sku != nil { @@ -769,6 +812,7 @@ func resourceContainerRegistryRead(d *schema.ResourceData, meta interface{}) err replication := make(map[string]interface{}) replication["location"] = valueLocation replication["tags"] = tags.Flatten(value.Tags) + replication["zone_redundancy_enabled"] = value.ZoneRedundancy == containerregistry.ZoneRedundancyEnabled geoReplications = append(geoReplications, replication) } } @@ -779,7 +823,7 @@ func resourceContainerRegistryRead(d *schema.ResourceData, meta interface{}) err return tags.FlattenAndSet(d, resp.Tags) } -func resourceContainerRegistryDelete(d *schema.ResourceData, meta interface{}) error { +func resourceContainerRegistryDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.RegistriesClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -816,7 +860,7 @@ func expandNetworkRuleSet(profiles []interface{}) *containerregistry.NetworkRule profile := profiles[0].(map[string]interface{}) - ipRuleConfigs := profile["ip_rule"].(*schema.Set).List() + ipRuleConfigs := profile["ip_rule"].(*pluginsdk.Set).List() ipRules := make([]containerregistry.IPRule, 0) for _, ipRuleInterface := range ipRuleConfigs { config := ipRuleInterface.(map[string]interface{}) @@ -827,7 +871,7 @@ func expandNetworkRuleSet(profiles []interface{}) *containerregistry.NetworkRule ipRules = append(ipRules, newIpRule) } - networkRuleConfigs := profile["virtual_network"].(*schema.Set).List() + networkRuleConfigs := profile["virtual_network"].(*pluginsdk.Set).List() virtualNetworkRules := make([]containerregistry.VirtualNetworkRule, 0) for _, networkRuleInterface := range networkRuleConfigs { config := networkRuleInterface.(map[string]interface{}) @@ -893,11 +937,11 @@ func expandTrustPolicy(p []interface{}) *containerregistry.TrustPolicy { return &trustPolicy } -func expandReplicationsFromLocations(p []interface{}) []*containerregistry.Replication { - replications := make([]*containerregistry.Replication, 0) +func expandReplicationsFromLocations(p []interface{}) []containerregistry.Replication { + replications := make([]containerregistry.Replication, 0) for _, value := range p { location := azure.NormalizeLocation(value) - replications = append(replications, &containerregistry.Replication{ + replications = append(replications, containerregistry.Replication{ Location: &location, Name: &location, }) @@ -905,8 +949,8 @@ func expandReplicationsFromLocations(p []interface{}) []*containerregistry.Repli return replications } -func expandReplications(p []interface{}) []*containerregistry.Replication { - replications := make([]*containerregistry.Replication, 0) +func expandReplications(p []interface{}) []containerregistry.Replication { + replications := make([]containerregistry.Replication, 0) if p == nil { return replications } @@ -914,10 +958,17 @@ func expandReplications(p []interface{}) []*containerregistry.Replication { value := v.(map[string]interface{}) location := azure.NormalizeLocation(value["location"]) tags := tags.Expand(value["tags"].(map[string]interface{})) - replications = append(replications, &containerregistry.Replication{ + zoneRedundancy := containerregistry.ZoneRedundancyDisabled + if value["zone_redundancy_enabled"].(bool) { + zoneRedundancy = containerregistry.ZoneRedundancyEnabled + } + replications = append(replications, containerregistry.Replication{ Location: &location, Name: &location, Tags: tags, + ReplicationProperties: &containerregistry.ReplicationProperties{ + ZoneRedundancy: zoneRedundancy, + }, }) } return replications @@ -993,6 +1044,12 @@ func flattenIdentityProperties(identityProperties *containerregistry.IdentityPro } identity["identity_ids"] = identityIds } + if identityProperties.PrincipalID != nil { + identity["principal_id"] = *identityProperties.PrincipalID + } + if identityProperties.TenantID != nil { + identity["tenant_id"] = *identityProperties.TenantID + } return []interface{}{identity}, nil } diff --git a/azurerm/internal/services/containers/container_registry_resource_test.go b/azurerm/internal/services/containers/container_registry_resource_test.go index 17642a91fda0..98e57af14b08 100644 --- a/azurerm/internal/services/containers/container_registry_resource_test.go +++ b/azurerm/internal/services/containers/container_registry_resource_test.go @@ -6,15 +6,14 @@ import ( "strings" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/validate" - - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + validateHelper "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -82,10 +81,10 @@ func TestAccContainerRegistry_basic_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry", "test") r := ContainerRegistryResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -97,10 +96,10 @@ func TestAccContainerRegistry_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry", "test") r := ContainerRegistryResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicManaged(data, "Basic"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -115,10 +114,10 @@ func TestAccContainerRegistry_basic_standard(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry", "test") r := ContainerRegistryResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicManaged(data, "Standard"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -130,10 +129,10 @@ func TestAccContainerRegistry_basic_premium(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry", "test") r := ContainerRegistryResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicManaged(data, "Premium"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -145,24 +144,24 @@ func TestAccContainerRegistry_basic_basic2Premium2basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry", "test") r := ContainerRegistryResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("sku").HasValue("Basic"), ), }, { Config: r.basicManaged(data, "Premium"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("sku").HasValue("Premium"), ), }, { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("sku").HasValue("Basic"), ), @@ -174,10 +173,10 @@ func TestAccContainerRegistry_complete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry", "test") r := ContainerRegistryResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.complete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -189,16 +188,16 @@ func TestAccContainerRegistry_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry", "test") r := ContainerRegistryResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.complete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Config: r.completeUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -215,11 +214,11 @@ func TestAccContainerRegistry_geoReplicationLocation(t *testing.T) { secondaryLocation := location.Normalize(data.Locations.Secondary) ternaryLocation := location.Normalize(data.Locations.Ternary) - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ // first config creates an ACR with locations { Config: r.geoReplicationLocation(data, []string{secondaryLocation}), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("sku").HasValue(skuPremium), check.That(data.ResourceName).Key("georeplication_locations.#").HasValue("1"), @@ -229,7 +228,7 @@ func TestAccContainerRegistry_geoReplicationLocation(t *testing.T) { // second config updates the ACR with updated locations { Config: r.geoReplicationLocation(data, []string{ternaryLocation}), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("sku").HasValue(skuPremium), check.That(data.ResourceName).Key("georeplication_locations.#").HasValue("1"), @@ -239,7 +238,7 @@ func TestAccContainerRegistry_geoReplicationLocation(t *testing.T) { // third config updates the ACR with updated locations { Config: r.geoReplicationLocation(data, []string{secondaryLocation, ternaryLocation}), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("sku").HasValue(skuPremium), check.That(data.ResourceName).Key("georeplication_locations.#").HasValue("2"), @@ -250,7 +249,7 @@ func TestAccContainerRegistry_geoReplicationLocation(t *testing.T) { // fourth config updates the ACR with no location. { Config: r.geoReplicationUpdateWithNoLocation(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("sku").HasValue(skuPremium), check.That(data.ResourceName).Key("georeplication_locations.#").HasValue("0"), @@ -259,7 +258,7 @@ func TestAccContainerRegistry_geoReplicationLocation(t *testing.T) { // fifth config updates the SKU to basic. { Config: r.geoReplicationUpdateWithNoLocation_basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("sku").HasValue(skuBasic), check.That(data.ResourceName).Key("georeplication_locations.#").HasValue("0"), @@ -278,11 +277,11 @@ func TestAccContainerRegistry_geoReplication(t *testing.T) { secondaryLocation := location.Normalize(data.Locations.Secondary) ternaryLocation := location.Normalize(data.Locations.Ternary) - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ // first config creates an ACR with locations { Config: r.geoReplication(data, secondaryLocation), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("sku").HasValue(skuPremium), check.That(data.ResourceName).Key("georeplications.#").HasValue("1"), @@ -295,7 +294,7 @@ func TestAccContainerRegistry_geoReplication(t *testing.T) { // second config updates the ACR with updated locations { Config: r.geoReplication(data, ternaryLocation), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("sku").HasValue(skuPremium), check.That(data.ResourceName).Key("georeplications.#").HasValue("1"), @@ -308,7 +307,7 @@ func TestAccContainerRegistry_geoReplication(t *testing.T) { // third config updates the ACR with updated locations { Config: r.geoReplicationMultipleLocations(data, secondaryLocation, ternaryLocation), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("sku").HasValue(skuPremium), check.That(data.ResourceName).Key("georeplications.#").HasValue("2"), @@ -321,7 +320,7 @@ func TestAccContainerRegistry_geoReplication(t *testing.T) { // fourth config updates the ACR with no location { Config: r.geoReplicationUpdateWithNoReplication(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("sku").HasValue(skuPremium), check.That(data.ResourceName).Key("georeplications.#").HasValue("0"), @@ -331,7 +330,7 @@ func TestAccContainerRegistry_geoReplication(t *testing.T) { // fifth config updates the SKU to basic. { Config: r.geoReplicationUpdateWithNoReplication_basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("sku").HasValue(skuBasic), check.That(data.ResourceName).Key("georeplications.#").HasValue("0"), @@ -349,11 +348,11 @@ func TestAccContainerRegistry_geoReplicationSwitch(t *testing.T) { secondaryLocation := location.Normalize(data.Locations.Secondary) ternaryLocation := location.Normalize(data.Locations.Ternary) - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ // first config creates an ACR using georeplication_locations { Config: r.geoReplicationLocation(data, []string{secondaryLocation, ternaryLocation}), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("sku").HasValue(skuPremium), check.That(data.ResourceName).Key("georeplication_locations.#").HasValue("2"), @@ -363,7 +362,7 @@ func TestAccContainerRegistry_geoReplicationSwitch(t *testing.T) { // second config updates the ACR using georeplications { Config: r.geoReplicationMultipleLocations(data, secondaryLocation, ternaryLocation), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("sku").HasValue(skuPremium), check.That(data.ResourceName).Key("georeplications.#").HasValue("2"), @@ -377,10 +376,10 @@ func TestAccContainerRegistry_networkAccessProfileIp(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry", "test") r := ContainerRegistryResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkAccessProfile_ip(data, "Premium"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_rule_set.0.default_action").HasValue("Allow"), check.That(data.ResourceName).Key("network_rule_set.0.ip_rule.#").HasValue("2"), @@ -394,16 +393,16 @@ func TestAccContainerRegistry_networkAccessProfile_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry", "test") r := ContainerRegistryResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicManaged(data, "Premium"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Config: r.networkAccessProfile_ip(data, "Premium"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_rule_set.0.default_action").HasValue("Allow"), check.That(data.ResourceName).Key("network_rule_set.0.ip_rule.#").HasValue("2"), @@ -412,7 +411,7 @@ func TestAccContainerRegistry_networkAccessProfile_update(t *testing.T) { data.ImportStep(), { Config: r.networkAccessProfile_vnet(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_rule_set.0.default_action").HasValue("Deny"), check.That(data.ResourceName).Key("network_rule_set.0.virtual_network.#").HasValue("1"), @@ -421,7 +420,7 @@ func TestAccContainerRegistry_networkAccessProfile_update(t *testing.T) { data.ImportStep(), { Config: r.networkAccessProfile_both(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_rule_set.0.default_action").HasValue("Deny"), check.That(data.ResourceName).Key("network_rule_set.0.ip_rule.#").HasValue("1"), @@ -435,10 +434,10 @@ func TestAccContainerRegistry_networkAccessProfileVnet(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry", "test") r := ContainerRegistryResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkAccessProfile_vnet(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_rule_set.0.default_action").HasValue("Deny"), check.That(data.ResourceName).Key("network_rule_set.0.virtual_network.#").HasValue("1"), @@ -452,10 +451,10 @@ func TestAccContainerRegistry_policies(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry", "test") r := ContainerRegistryResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.policies(data, 10), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_rule_set.0.default_action").HasValue("Allow"), check.That(data.ResourceName).Key("network_rule_set.0.virtual_network.#").HasValue("0"), @@ -467,7 +466,7 @@ func TestAccContainerRegistry_policies(t *testing.T) { }, { Config: r.policies(data, 20), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_rule_set.0.default_action").HasValue("Allow"), check.That(data.ResourceName).Key("network_rule_set.0.virtual_network.#").HasValue("0"), @@ -479,7 +478,7 @@ func TestAccContainerRegistry_policies(t *testing.T) { }, { Config: r.policies_downgradeUpdate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_rule_set.#").HasValue("0"), check.That(data.ResourceName).Key("quarantine_policy_enabled").HasValue("false"), @@ -496,21 +495,72 @@ func TestAccContainerRegistry_identity(t *testing.T) { r := ContainerRegistryResource{} skuPremium := "Premium" userAssigned := "userAssigned" - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ // creates an ACR with encryption { Config: r.identity(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("sku").HasValue(skuPremium), + check.That(data.ResourceName).Key("identity.0.type").HasValue(userAssigned), + ), + }, + data.ImportStep(), + }) +} + +func TestAccContainerRegistry_identitySystemAssigned(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_container_registry", "test") + r := ContainerRegistryResource{} + skuPremium := "Premium" + userAssigned := "systemAssigned" + data.ResourceTest(t, r, []acceptance.TestStep{ + // creates an ACR with encryption + { + Config: r.identitySystemAssigned(data), + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("sku").HasValue(skuPremium), check.That(data.ResourceName).Key("identity.0.type").HasValue(userAssigned), + acceptance.TestMatchResourceAttr(data.ResourceName, "identity.0.principal_id", validateHelper.UUIDRegExp), + acceptance.TestMatchResourceAttr(data.ResourceName, "identity.0.tenant_id", validateHelper.UUIDRegExp), ), }, data.ImportStep(), }) } -func (t ContainerRegistryResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func TestAccContainerRegistry_zoneRedundancy(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_container_registry", "test") + r := ContainerRegistryResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.zoneRedundancy(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccContainerRegistry_geoReplicationZoneRedundancy(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_container_registry", "test") + r := ContainerRegistryResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.geoReplicationZoneRedundancy(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func (t ContainerRegistryResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := azure.ParseAzureResourceID(state.ID) if err != nil { return nil, err @@ -1037,3 +1087,69 @@ resource "azurerm_user_assigned_identity" "test" { } `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) } + +func (ContainerRegistryResource) identitySystemAssigned(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-acr-%d" + location = "%s" +} + +resource "azurerm_container_registry" "test" { + name = "testacccr%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + sku = "Premium" + identity { + type = "SystemAssigned" + } +} + + +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +} + +func (ContainerRegistryResource) zoneRedundancy(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} +resource "azurerm_resource_group" "test" { + name = "acctestRG-acr-%d" + location = "%s" +} +resource "azurerm_container_registry" "test" { + name = "testacccr%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + sku = "Premium" + zone_redundancy_enabled = true +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +} + +func (ContainerRegistryResource) geoReplicationZoneRedundancy(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} +resource "azurerm_resource_group" "test" { + name = "acctestRG-acr-%d" + location = "%s" +} +resource "azurerm_container_registry" "test" { + name = "testacccr%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + sku = "Premium" + georeplications { + location = "%s" + zone_redundancy_enabled = true + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.Locations.Secondary) +} diff --git a/azurerm/internal/services/containers/container_registry_scope_map_data_source.go b/azurerm/internal/services/containers/container_registry_scope_map_data_source.go index bbca746ce54f..8b4fd07c4557 100644 --- a/azurerm/internal/services/containers/container_registry_scope_map_data_source.go +++ b/azurerm/internal/services/containers/container_registry_scope_map_data_source.go @@ -4,49 +4,49 @@ import ( "fmt" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceContainerRegistryScopeMap() *schema.Resource { - return &schema.Resource{ +func dataSourceContainerRegistryScopeMap() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceContainerRegistryScopeMapRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "container_registry_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.ContainerRegistryName, }, "resource_group_name": azure.SchemaResourceGroupNameForDataSource(), "description": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "actions": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, }, } } -func dataSourceContainerRegistryScopeMapRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceContainerRegistryScopeMapRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.ScopeMapsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/containers/container_registry_scope_map_data_source_test.go b/azurerm/internal/services/containers/container_registry_scope_map_data_source_test.go index 2a6e0353daf6..1fde7c573d51 100644 --- a/azurerm/internal/services/containers/container_registry_scope_map_data_source_test.go +++ b/azurerm/internal/services/containers/container_registry_scope_map_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -16,10 +15,10 @@ func TestAccDataSourceContainerRegistryScopeMap_complete(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_container_registry_scope_map", "test") r := ContainerRegistryScopeMapDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.complete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("name").Exists(), check.That(data.ResourceName).Key("resource_group_name").Exists(), check.That(data.ResourceName).Key("container_registry_name").Exists(), diff --git a/azurerm/internal/services/containers/container_registry_scope_map_resource.go b/azurerm/internal/services/containers/container_registry_scope_map_resource.go index 9442aa4590f7..1558525b7c5b 100644 --- a/azurerm/internal/services/containers/container_registry_scope_map_resource.go +++ b/azurerm/internal/services/containers/container_registry_scope_map_resource.go @@ -7,44 +7,42 @@ import ( "github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry" "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceContainerRegistryScopeMap() *schema.Resource { - return &schema.Resource{ - Create: resourceContainerRegistryScopeMapCreate, - Read: resourceContainerRegistryScopeMapRead, - Update: resourceContainerRegistryScopeMapUpdate, - Delete: resourceContainerRegistryScopeMapDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, +func resourceContainerRegistryScopeMap() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceContainerRegistryScopeMapCreate, + Read: resourceContainerRegistryScopeMapRead, + Update: resourceContainerRegistryScopeMapUpdate, + Delete: resourceContainerRegistryScopeMapDelete, + Importer: pluginsdk.DefaultImporter(), + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.ContainerRegistryScopeMapName, }, "description": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringLenBetween(1, 256), }, @@ -52,18 +50,18 @@ func resourceContainerRegistryScopeMap() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "container_registry_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.ContainerRegistryName, }, "actions": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, MinItems: 1, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, }, @@ -71,7 +69,7 @@ func resourceContainerRegistryScopeMap() *schema.Resource { } } -func resourceContainerRegistryScopeMapCreate(d *schema.ResourceData, meta interface{}) error { +func resourceContainerRegistryScopeMapCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.ScopeMapsClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -127,7 +125,7 @@ func resourceContainerRegistryScopeMapCreate(d *schema.ResourceData, meta interf return resourceContainerRegistryScopeMapRead(d, meta) } -func resourceContainerRegistryScopeMapUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceContainerRegistryScopeMapUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.ScopeMapsClient ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -169,7 +167,7 @@ func resourceContainerRegistryScopeMapUpdate(d *schema.ResourceData, meta interf return resourceContainerRegistryScopeMapRead(d, meta) } -func resourceContainerRegistryScopeMapRead(d *schema.ResourceData, meta interface{}) error { +func resourceContainerRegistryScopeMapRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.ScopeMapsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -200,7 +198,7 @@ func resourceContainerRegistryScopeMapRead(d *schema.ResourceData, meta interfac return nil } -func resourceContainerRegistryScopeMapDelete(d *schema.ResourceData, meta interface{}) error { +func resourceContainerRegistryScopeMapDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.ScopeMapsClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/containers/container_registry_scope_map_resource_test.go b/azurerm/internal/services/containers/container_registry_scope_map_resource_test.go index 268569bc8d95..1af8d8c83f4c 100644 --- a/azurerm/internal/services/containers/container_registry_scope_map_resource_test.go +++ b/azurerm/internal/services/containers/container_registry_scope_map_resource_test.go @@ -5,12 +5,11 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -21,10 +20,10 @@ func TestAccContainerRegistryScopeMap_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry_scope_map", "test") r := ContainerRegistryScopeMapResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -36,10 +35,10 @@ func TestAccContainerRegistryScopeMap_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry_scope_map", "test") r := ContainerRegistryScopeMapResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("actions.#").HasValue("1"), check.That(data.ResourceName).Key("actions.0").HasValue("repositories/testrepo/content/read"), @@ -56,10 +55,10 @@ func TestAccContainerRegistryScopeMap_complete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry_scope_map", "test") r := ContainerRegistryScopeMapResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.complete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("actions.#").HasValue("1"), check.That(data.ResourceName).Key("actions.0").HasValue("repositories/testrepo/content/read"), @@ -73,10 +72,10 @@ func TestAccontainerRegistryScopeMap_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry_scope_map", "test") r := ContainerRegistryScopeMapResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.complete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("actions.#").HasValue("1"), check.That(data.ResourceName).Key("actions.0").HasValue("repositories/testrepo/content/read"), @@ -85,7 +84,7 @@ func TestAccontainerRegistryScopeMap_update(t *testing.T) { data.ImportStep(), { Config: r.completeUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("actions.#").HasValue("2"), check.That(data.ResourceName).Key("actions.0").HasValue("repositories/testrepo/content/read"), @@ -96,7 +95,7 @@ func TestAccontainerRegistryScopeMap_update(t *testing.T) { }) } -func (ContainerRegistryScopeMapResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (ContainerRegistryScopeMapResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.ContainerRegistryScopeMapID(state.ID) if err != nil { diff --git a/azurerm/internal/services/containers/container_registry_token_data_source.go b/azurerm/internal/services/containers/container_registry_token_data_source.go index 19ae0e546d99..42d68d487f35 100644 --- a/azurerm/internal/services/containers/container_registry_token_data_source.go +++ b/azurerm/internal/services/containers/container_registry_token_data_source.go @@ -5,31 +5,31 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceContainerRegistryToken() *schema.Resource { - return &schema.Resource{ +func dataSourceContainerRegistryToken() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceContainerRegistryTokenRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.ContainerRegistryTokenName, }, "container_registry_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.ContainerRegistryName, }, @@ -37,19 +37,19 @@ func dataSourceContainerRegistryToken() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupNameForDataSource(), "scope_map_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, }, } } -func dataSourceContainerRegistryTokenRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceContainerRegistryTokenRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.TokensClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/containers/container_registry_token_data_source_test.go b/azurerm/internal/services/containers/container_registry_token_data_source_test.go index 4831715fb42d..a58b70356940 100644 --- a/azurerm/internal/services/containers/container_registry_token_data_source_test.go +++ b/azurerm/internal/services/containers/container_registry_token_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -16,10 +15,10 @@ func TestAccDataSourceContainerRegistryToken_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_container_registry_token", "test") r := ContainerRegistryTokenDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("name").Exists(), check.That(data.ResourceName).Key("resource_group_name").Exists(), check.That(data.ResourceName).Key("container_registry_name").Exists(), diff --git a/azurerm/internal/services/containers/container_registry_token_resource.go b/azurerm/internal/services/containers/container_registry_token_resource.go index ac833714ee64..36b160856cb9 100644 --- a/azurerm/internal/services/containers/container_registry_token_resource.go +++ b/azurerm/internal/services/containers/container_registry_token_resource.go @@ -7,36 +7,34 @@ import ( "github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry" "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceContainerRegistryToken() *schema.Resource { - return &schema.Resource{ - Create: resourceContainerRegistryTokenCreate, - Read: resourceContainerRegistryTokenRead, - Update: resourceContainerRegistryTokenUpdate, - Delete: resourceContainerRegistryTokenDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, +func resourceContainerRegistryToken() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceContainerRegistryTokenCreate, + Read: resourceContainerRegistryTokenRead, + Update: resourceContainerRegistryTokenUpdate, + Delete: resourceContainerRegistryTokenDelete, + Importer: pluginsdk.DefaultImporter(), + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.ContainerRegistryTokenName, @@ -45,20 +43,20 @@ func resourceContainerRegistryToken() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "container_registry_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.ContainerRegistryName, }, "scope_map_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.ContainerRegistryScopeMapID, }, "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, @@ -66,7 +64,7 @@ func resourceContainerRegistryToken() *schema.Resource { } } -func resourceContainerRegistryTokenCreate(d *schema.ResourceData, meta interface{}) error { +func resourceContainerRegistryTokenCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.TokensClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) @@ -127,7 +125,7 @@ func resourceContainerRegistryTokenCreate(d *schema.ResourceData, meta interface return resourceContainerRegistryTokenRead(d, meta) } -func resourceContainerRegistryTokenUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceContainerRegistryTokenUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.TokensClient ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -174,7 +172,7 @@ func resourceContainerRegistryTokenUpdate(d *schema.ResourceData, meta interface return resourceContainerRegistryTokenRead(d, meta) } -func resourceContainerRegistryTokenRead(d *schema.ResourceData, meta interface{}) error { +func resourceContainerRegistryTokenRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.TokensClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -210,7 +208,7 @@ func resourceContainerRegistryTokenRead(d *schema.ResourceData, meta interface{} return nil } -func resourceContainerRegistryTokenDelete(d *schema.ResourceData, meta interface{}) error { +func resourceContainerRegistryTokenDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.TokensClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/containers/container_registry_token_resource_test.go b/azurerm/internal/services/containers/container_registry_token_resource_test.go index 3a1b51fe5f60..ba27405d9a37 100644 --- a/azurerm/internal/services/containers/container_registry_token_resource_test.go +++ b/azurerm/internal/services/containers/container_registry_token_resource_test.go @@ -5,12 +5,11 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -20,10 +19,10 @@ func TestAccContainerRegistryToken_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry_token", "test") r := ContainerRegistryTokenResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -35,10 +34,10 @@ func TestAccContainerRegistryToken_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry_token", "test") r := ContainerRegistryTokenResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -53,10 +52,10 @@ func TestAccContainerRegistryToken_complete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry_token", "test") r := ContainerRegistryTokenResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.complete(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("enabled").HasValue("true"), ), @@ -69,10 +68,10 @@ func TestAccContainerRegistryToken_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry_token", "test") r := ContainerRegistryTokenResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.complete(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("enabled").HasValue("true"), ), @@ -80,7 +79,7 @@ func TestAccContainerRegistryToken_update(t *testing.T) { data.ImportStep(), { Config: r.complete(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("enabled").HasValue("false"), ), @@ -89,7 +88,7 @@ func TestAccContainerRegistryToken_update(t *testing.T) { }) } -func (t ContainerRegistryTokenResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t ContainerRegistryTokenResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.ContainerRegistryTokenID(state.ID) if err != nil { diff --git a/azurerm/internal/services/containers/container_registry_webhook_resource.go b/azurerm/internal/services/containers/container_registry_webhook_resource.go index df454013727b..fe1f4e893fef 100644 --- a/azurerm/internal/services/containers/container_registry_webhook_resource.go +++ b/azurerm/internal/services/containers/container_registry_webhook_resource.go @@ -5,23 +5,21 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/validate" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2020-11-01-preview/containerregistry" "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceContainerRegistryWebhook() *schema.Resource { - return &schema.Resource{ +func resourceContainerRegistryWebhook() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceContainerRegistryWebhookCreate, Read: resourceContainerRegistryWebhookRead, Update: resourceContainerRegistryWebhookUpdate, @@ -30,16 +28,16 @@ func resourceContainerRegistryWebhook() *schema.Resource { // TODO: replace this with an importer which validates the ID during import Importer: pluginsdk.DefaultImporter(), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.ContainerRegistryWebhookName, @@ -48,28 +46,28 @@ func resourceContainerRegistryWebhook() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "registry_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.ContainerRegistryName, }, "service_uri": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.ContainerRegistryWebhookServiceUri, }, "custom_headers": { - Type: schema.TypeMap, + Type: pluginsdk.TypeMap, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "status": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: containerregistry.WebhookStatusEnabled, ValidateFunc: validation.StringInSlice([]string{ @@ -79,17 +77,17 @@ func resourceContainerRegistryWebhook() *schema.Resource { }, "scope": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: "", }, "actions": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, MinItems: 1, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringInSlice([]string{ string(containerregistry.ChartDelete), string(containerregistry.ChartPush), @@ -107,7 +105,7 @@ func resourceContainerRegistryWebhook() *schema.Resource { } } -func resourceContainerRegistryWebhookCreate(d *schema.ResourceData, meta interface{}) error { +func resourceContainerRegistryWebhookCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.WebhooksClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -162,7 +160,7 @@ func resourceContainerRegistryWebhookCreate(d *schema.ResourceData, meta interfa return resourceContainerRegistryWebhookRead(d, meta) } -func resourceContainerRegistryWebhookUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceContainerRegistryWebhookUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.WebhooksClient ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -197,7 +195,7 @@ func resourceContainerRegistryWebhookUpdate(d *schema.ResourceData, meta interfa return resourceContainerRegistryWebhookRead(d, meta) } -func resourceContainerRegistryWebhookRead(d *schema.ResourceData, meta interface{}) error { +func resourceContainerRegistryWebhookRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.WebhooksClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -263,7 +261,7 @@ func resourceContainerRegistryWebhookRead(d *schema.ResourceData, meta interface return tags.FlattenAndSet(d, resp.Tags) } -func resourceContainerRegistryWebhookDelete(d *schema.ResourceData, meta interface{}) error { +func resourceContainerRegistryWebhookDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.WebhooksClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -294,7 +292,7 @@ func resourceContainerRegistryWebhookDelete(d *schema.ResourceData, meta interfa return nil } -func expandWebhookPropertiesCreateParameters(d *schema.ResourceData) *containerregistry.WebhookPropertiesCreateParameters { +func expandWebhookPropertiesCreateParameters(d *pluginsdk.ResourceData) *containerregistry.WebhookPropertiesCreateParameters { serviceUri := d.Get("service_uri").(string) scope := d.Get("scope").(string) @@ -317,7 +315,7 @@ func expandWebhookPropertiesCreateParameters(d *schema.ResourceData) *containerr return &webhookProperties } -func expandWebhookPropertiesUpdateParameters(d *schema.ResourceData) *containerregistry.WebhookPropertiesUpdateParameters { +func expandWebhookPropertiesUpdateParameters(d *pluginsdk.ResourceData) *containerregistry.WebhookPropertiesUpdateParameters { serviceUri := d.Get("service_uri").(string) scope := d.Get("scope").(string) @@ -337,9 +335,9 @@ func expandWebhookPropertiesUpdateParameters(d *schema.ResourceData) *containerr return &webhookProperties } -func expandWebhookActions(d *schema.ResourceData) *[]containerregistry.WebhookAction { +func expandWebhookActions(d *pluginsdk.ResourceData) *[]containerregistry.WebhookAction { actions := make([]containerregistry.WebhookAction, 0) - for _, action := range d.Get("actions").(*schema.Set).List() { + for _, action := range d.Get("actions").(*pluginsdk.Set).List() { actions = append(actions, containerregistry.WebhookAction(action.(string))) } diff --git a/azurerm/internal/services/containers/container_registry_webhook_resource_test.go b/azurerm/internal/services/containers/container_registry_webhook_resource_test.go index 04df8bd42a05..7e9621d91815 100644 --- a/azurerm/internal/services/containers/container_registry_webhook_resource_test.go +++ b/azurerm/internal/services/containers/container_registry_webhook_resource_test.go @@ -5,12 +5,11 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -21,10 +20,10 @@ func TestAccContainerRegistryWebhook_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry_webhook", "test") r := ContainerRegistryWebhookResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -36,10 +35,10 @@ func TestAccContainerRegistryWebhook_withTags(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry_webhook", "test") r := ContainerRegistryWebhookResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.withTags(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("tags.%").HasValue("1"), check.That(data.ResourceName).Key("tags.label").HasValue("test"), @@ -47,7 +46,7 @@ func TestAccContainerRegistryWebhook_withTags(t *testing.T) { }, { Config: r.withTagsUpdate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("tags.%").HasValue("2"), check.That(data.ResourceName).Key("tags.label").HasValue("test1"), @@ -61,7 +60,7 @@ func TestAccContainerRegistryWebhook_actions(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry_webhook", "test") r := ContainerRegistryWebhookResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.actions(data), }, @@ -77,17 +76,17 @@ func TestAccContainerRegistryWebhook_status(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry_webhook", "test") r := ContainerRegistryWebhookResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.status(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("status").HasValue("enabled"), ), }, { Config: r.statusUpdate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("status").HasValue("disabled"), ), @@ -99,17 +98,17 @@ func TestAccContainerRegistryWebhook_serviceUri(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry_webhook", "test") r := ContainerRegistryWebhookResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.serviceUri(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("service_uri").HasValue("https://mywebhookreceiver.example/mytag"), ), }, { Config: r.serviceUriUpdate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("service_uri").HasValue("https://my.webhookreceiver.example/mytag/2"), ), @@ -121,17 +120,17 @@ func TestAccContainerRegistryWebhook_scope(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry_webhook", "test") r := ContainerRegistryWebhookResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.scope(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("scope").HasValue("mytag:*"), ), }, { Config: r.scopeUpdate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("scope").HasValue("mytag:4"), ), @@ -143,10 +142,10 @@ func TestAccContainerRegistryWebhook_customHeaders(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_container_registry_webhook", "test") r := ContainerRegistryWebhookResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.customHeaders(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("custom_headers.%").HasValue("1"), check.That(data.ResourceName).Key("custom_headers.Content-Type").HasValue("application/json"), @@ -154,7 +153,7 @@ func TestAccContainerRegistryWebhook_customHeaders(t *testing.T) { }, { Config: r.customHeadersUpdate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("custom_headers.%").HasValue("2"), check.That(data.ResourceName).Key("custom_headers.Content-Type").HasValue("application/xml"), @@ -622,7 +621,7 @@ resource "azurerm_container_registry_webhook" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.Locations.Primary) } -func (t ContainerRegistryWebhookResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t ContainerRegistryWebhookResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := azure.ParseAzureResourceID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/containers/kubernetes_addons.go b/azurerm/internal/services/containers/kubernetes_addons.go index e438fac431ae..6042c92e75d1 100644 --- a/azurerm/internal/services/containers/kubernetes_addons.go +++ b/azurerm/internal/services/containers/kubernetes_addons.go @@ -6,15 +6,14 @@ import ( "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-03-01/containerservice" "github.com/Azure/go-autorest/autorest/azure" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" commonValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + laparse "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loganalytics/parse" logAnalyticsValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loganalytics/validate" applicationGatewayValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" subnetValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" - - laparse "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loganalytics/parse" ) const ( @@ -35,7 +34,6 @@ const ( var unsupportedAddonsForEnvironment = map[string][]string{ azure.ChinaCloud.Name: { aciConnectorKey, // https://github.com/terraform-providers/terraform-provider-azurerm/issues/5510 - azurePolicyKey, // https://github.com/terraform-providers/terraform-provider-azurerm/issues/6462 httpApplicationRoutingKey, // https://github.com/terraform-providers/terraform-provider-azurerm/issues/5960 kubernetesDashboardKey, // https://github.com/terraform-providers/terraform-provider-azurerm/issues/7487 }, @@ -46,28 +44,28 @@ var unsupportedAddonsForEnvironment = map[string][]string{ }, } -func schemaKubernetesAddOnProfiles() *schema.Schema { +func schemaKubernetesAddOnProfiles() *pluginsdk.Schema { //lintignore:XS003 - return &schema.Schema{ - Type: schema.TypeList, + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, MaxItems: 1, Optional: true, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "aci_connector_linux": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, MaxItems: 1, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Required: true, }, "subnet_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringIsNotEmpty, }, @@ -76,13 +74,13 @@ func schemaKubernetesAddOnProfiles() *schema.Schema { }, "azure_policy": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, MaxItems: 1, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Required: true, }, }, @@ -90,13 +88,13 @@ func schemaKubernetesAddOnProfiles() *schema.Schema { }, "kube_dashboard": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, MaxItems: 1, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Required: true, }, }, @@ -104,17 +102,17 @@ func schemaKubernetesAddOnProfiles() *schema.Schema { }, "http_application_routing": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, MaxItems: 1, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Required: true, }, "http_application_routing_zone_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -122,35 +120,35 @@ func schemaKubernetesAddOnProfiles() *schema.Schema { }, "oms_agent": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, MaxItems: 1, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Required: true, }, "log_analytics_workspace_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: logAnalyticsValidate.LogAnalyticsWorkspaceID, }, "oms_agent_identity": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "client_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "object_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "user_assigned_identity_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -161,52 +159,57 @@ func schemaKubernetesAddOnProfiles() *schema.Schema { }, "ingress_application_gateway": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, MaxItems: 1, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Required: true, }, "gateway_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ConflictsWith: []string{"addon_profile.0.ingress_application_gateway.0.subnet_cidr", "addon_profile.0.ingress_application_gateway.0.subnet_id"}, ValidateFunc: applicationGatewayValidate.ApplicationGatewayID, }, + "gateway_name": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, "subnet_cidr": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ConflictsWith: []string{"addon_profile.0.ingress_application_gateway.0.gateway_id", "addon_profile.0.ingress_application_gateway.0.subnet_id"}, ValidateFunc: commonValidate.CIDR, }, "subnet_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ConflictsWith: []string{"addon_profile.0.ingress_application_gateway.0.gateway_id", "addon_profile.0.ingress_application_gateway.0.subnet_cidr"}, ValidateFunc: subnetValidate.SubnetID, }, "effective_gateway_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "ingress_application_gateway_identity": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "client_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "object_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "user_assigned_identity_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -320,6 +323,10 @@ func expandKubernetesAddOnProfiles(input []interface{}, env azure.Environment) ( config["applicationGatewayId"] = utils.String(gatewayId.(string)) } + if gatewayName, ok := value["gateway_name"]; ok && gatewayName != "" { + config["applicationGatewayName"] = utils.String(gatewayName.(string)) + } + if subnetCIDR, ok := value["subnet_cidr"]; ok && subnetCIDR != "" { config["subnetCIDR"] = utils.String(subnetCIDR.(string)) } @@ -462,6 +469,11 @@ func flattenKubernetesAddOnProfiles(profile map[string]*containerservice.Managed gatewayId = *v } + gatewayName := "" + if v := kubernetesAddonProfilelocateInConfig(ingressApplicationGateway.Config, "applicationGatewayName"); v != nil { + gatewayName = *v + } + effectiveGatewayId := "" if v := kubernetesAddonProfilelocateInConfig(ingressApplicationGateway.Config, "effectiveApplicationGatewayId"); v != nil { effectiveGatewayId = *v @@ -482,6 +494,7 @@ func flattenKubernetesAddOnProfiles(profile map[string]*containerservice.Managed ingressApplicationGateways = append(ingressApplicationGateways, map[string]interface{}{ "enabled": enabled, "gateway_id": gatewayId, + "gateway_name": gatewayName, "effective_gateway_id": effectiveGatewayId, "subnet_cidr": subnetCIDR, "subnet_id": subnetId, diff --git a/azurerm/internal/services/containers/kubernetes_cluster_addons_resource_test.go b/azurerm/internal/services/containers/kubernetes_cluster_addons_resource_test.go index e12f71402f81..56d279c1d59d 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_addons_resource_test.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_addons_resource_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -33,10 +32,10 @@ func testAccKubernetesCluster_addonProfileAciConnectorLinux(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.addonProfileAciConnectorLinuxConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("addon_profile.0.http_application_routing.#").HasValue("0"), check.That(data.ResourceName).Key("addon_profile.0.aci_connector_linux.#").HasValue("1"), @@ -57,10 +56,10 @@ func testAccKubernetesCluster_addonProfileAciConnectorLinuxDisabled(t *testing.T data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.addonProfileAciConnectorLinuxDisabledConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("addon_profile.0.http_application_routing.#").HasValue("0"), check.That(data.ResourceName).Key("default_node_pool.0.node_count").HasValue("1"), @@ -82,11 +81,11 @@ func testAccKubernetesCluster_addonProfileAzurePolicy(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // Enable with V2 Config: r.addonProfileAzurePolicyConfig(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("addon_profile.0.azure_policy.#").HasValue("1"), check.That(data.ResourceName).Key("addon_profile.0.azure_policy.0.enabled").HasValue("true"), @@ -96,7 +95,7 @@ func testAccKubernetesCluster_addonProfileAzurePolicy(t *testing.T) { { // Disable it Config: r.addonProfileAzurePolicyConfig(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("addon_profile.0.azure_policy.#").HasValue("1"), check.That(data.ResourceName).Key("addon_profile.0.azure_policy.0.enabled").HasValue("false"), @@ -106,7 +105,7 @@ func testAccKubernetesCluster_addonProfileAzurePolicy(t *testing.T) { { // Enable with V2 Config: r.addonProfileAzurePolicyConfig(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("addon_profile.0.azure_policy.#").HasValue("1"), check.That(data.ResourceName).Key("addon_profile.0.azure_policy.0.enabled").HasValue("true"), @@ -125,10 +124,10 @@ func testAccKubernetesCluster_addonProfileKubeDashboard(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.addonProfileKubeDashboardConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("addon_profile.0.kube_dashboard.#").HasValue("1"), check.That(data.ResourceName).Key("addon_profile.0.kube_dashboard.0.enabled").HasValue("false"), @@ -147,10 +146,10 @@ func testAccKubernetesCluster_addonProfileOMS(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.addonProfileOMSConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("addon_profile.0.http_application_routing.#").HasValue("0"), check.That(data.ResourceName).Key("addon_profile.0.oms_agent.#").HasValue("1"), @@ -174,10 +173,10 @@ func testAccKubernetesCluster_addonProfileOMSToggle(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.addonProfileOMSConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("addon_profile.0.http_application_routing.#").HasValue("0"), check.That(data.ResourceName).Key("default_node_pool.0.node_count").HasValue("1"), @@ -189,7 +188,7 @@ func testAccKubernetesCluster_addonProfileOMSToggle(t *testing.T) { data.ImportStep(), { Config: r.addonProfileOMSDisabledConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("addon_profile.0.http_application_routing.#").HasValue("0"), check.That(data.ResourceName).Key("default_node_pool.0.node_count").HasValue("1"), @@ -201,7 +200,7 @@ func testAccKubernetesCluster_addonProfileOMSToggle(t *testing.T) { data.ImportStep(), { Config: r.addonProfileOMSScaleWithoutBlockConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("addon_profile.0.http_application_routing.#").HasValue("0"), check.That(data.ResourceName).Key("default_node_pool.0.node_count").HasValue("2"), @@ -213,7 +212,7 @@ func testAccKubernetesCluster_addonProfileOMSToggle(t *testing.T) { data.ImportStep(), { Config: r.addonProfileOMSConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("addon_profile.0.http_application_routing.#").HasValue("0"), check.That(data.ResourceName).Key("default_node_pool.0.node_count").HasValue("1"), @@ -235,10 +234,10 @@ func testAccKubernetesCluster_addonProfileRoutingToggle(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.addonProfileRoutingConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("addon_profile.0.http_application_routing.#").HasValue("1"), check.That(data.ResourceName).Key("addon_profile.0.http_application_routing.0.enabled").HasValue("true"), @@ -249,7 +248,7 @@ func testAccKubernetesCluster_addonProfileRoutingToggle(t *testing.T) { data.ImportStep(), { Config: r.addonProfileRoutingConfigDisabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("addon_profile.0.http_application_routing.#").HasValue("1"), check.That(data.ResourceName).Key("addon_profile.0.http_application_routing.0.enabled").HasValue("false"), @@ -270,10 +269,10 @@ func testAccKubernetesCluster_addonProfileIngressApplicationGateway_appGatewayId data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.addonProfileIngressApplicationGatewayAppGatewayConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("addon_profile.0.ingress_application_gateway.0.effective_gateway_id").Exists(), check.That(data.ResourceName).Key("addon_profile.0.ingress_application_gateway.0.effective_gateway_id").MatchesOtherKey( @@ -297,11 +296,12 @@ func testAccKubernetesCluster_addonProfileIngressApplicationGateway_subnetCIDR(t data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.addonProfileIngressApplicationGatewaySubnetCIDRConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("addon_profile.0.ingress_application_gateway.0.gateway_name").Exists(), check.That(data.ResourceName).Key("addon_profile.0.ingress_application_gateway.0.effective_gateway_id").Exists(), check.That(data.ResourceName).Key("addon_profile.0.ingress_application_gateway.0.subnet_cidr").HasValue(addOnAppGatewaySubnetCIDR), ), @@ -309,7 +309,7 @@ func testAccKubernetesCluster_addonProfileIngressApplicationGateway_subnetCIDR(t data.ImportStep(), { Config: r.addonProfileIngressApplicationGatewayDisabledConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("addon_profile.0.ingress_application_gateway.#").HasValue("1"), check.That(data.ResourceName).Key("addon_profile.0.ingress_application_gateway.0.enabled").HasValue("false"), @@ -328,11 +328,12 @@ func testAccKubernetesCluster_addonProfileIngressApplicationGateway_subnetId(t * data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.addonProfileIngressApplicationGatewaySubnetIdConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("addon_profile.0.ingress_application_gateway.0.gateway_name").Exists(), check.That(data.ResourceName).Key("addon_profile.0.ingress_application_gateway.0.effective_gateway_id").Exists(), ), }, @@ -969,8 +970,9 @@ resource "azurerm_kubernetes_cluster" "test" { addon_profile { ingress_application_gateway { - enabled = true - subnet_cidr = "%s" + enabled = true + gateway_name = "acctestgwn%d" + subnet_cidr = "%s" } kube_dashboard { enabled = false @@ -981,7 +983,7 @@ resource "azurerm_kubernetes_cluster" "test" { type = "SystemAssigned" } } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, addOnAppGatewaySubnetCIDR) +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, addOnAppGatewaySubnetCIDR) } func (KubernetesClusterResource) addonProfileIngressApplicationGatewayDisabledConfig(data acceptance.TestData) string { @@ -1078,8 +1080,9 @@ resource "azurerm_kubernetes_cluster" "test" { addon_profile { ingress_application_gateway { - enabled = true - subnet_id = azurerm_subnet.test.id + enabled = true + gateway_name = "acctestgwn%d" + subnet_id = azurerm_subnet.test.id } kube_dashboard { enabled = false @@ -1090,5 +1093,5 @@ resource "azurerm_kubernetes_cluster" "test" { type = "SystemAssigned" } } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger) +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger) } diff --git a/azurerm/internal/services/containers/kubernetes_cluster_auth_resource_test.go b/azurerm/internal/services/containers/kubernetes_cluster_auth_resource_test.go index c871192026fc..607505d4ac8f 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_auth_resource_test.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_auth_resource_test.go @@ -4,23 +4,24 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) var kubernetesAuthTests = map[string]func(t *testing.T){ - "apiServerAuthorizedIPRanges": testAccKubernetesCluster_apiServerAuthorizedIPRanges, - "managedClusterIdentity": testAccKubernetesCluster_managedClusterIdentity, - "userAssignedIdentity": testAccKubernetesCluster_userAssignedIdentity, - "updateWithUserAssignedIdentity": testAccKubernetesCluster_updateWithUserAssignedIdentity, - "roleBasedAccessControl": testAccKubernetesCluster_roleBasedAccessControl, - "AAD": testAccKubernetesCluster_roleBasedAccessControlAAD, - "AADUpdateToManaged": testAccKubernetesCluster_roleBasedAccessControlAADUpdateToManaged, - "AADManaged": testAccKubernetesCluster_roleBasedAccessControlAADManaged, - "AADManagedChange": testAccKubernetesCluster_roleBasedAccessControlAADManagedChange, - "roleBasedAccessControlAzure": testAccKubernetesCluster_roleBasedAccessControlAzure, - "servicePrincipal": testAccKubernetesCluster_servicePrincipal, + "apiServerAuthorizedIPRanges": testAccKubernetesCluster_apiServerAuthorizedIPRanges, + "managedClusterIdentity": testAccKubernetesCluster_managedClusterIdentity, + "userAssignedIdentity": testAccKubernetesCluster_userAssignedIdentity, + "updateWithUserAssignedIdentity": testAccKubernetesCluster_updateWithUserAssignedIdentity, + "roleBasedAccessControl": testAccKubernetesCluster_roleBasedAccessControl, + "AAD": testAccKubernetesCluster_roleBasedAccessControlAAD, + "AADUpdateToManaged": testAccKubernetesCluster_roleBasedAccessControlAADUpdateToManaged, + "AADManaged": testAccKubernetesCluster_roleBasedAccessControlAADManaged, + "AADManagedChange": testAccKubernetesCluster_roleBasedAccessControlAADManagedChange, + "roleBasedAccessControlAzure": testAccKubernetesCluster_roleBasedAccessControlAzure, + "servicePrincipal": testAccKubernetesCluster_servicePrincipal, + "servicePrincipalToSystemAssigned": testAccKubernetesCluster_servicePrincipalToSystemAssignedIdentity, + "servicePrincipalToUserAssigned": testAccKubernetesCluster_servicePrincipalToUserAssignedIdentity, } func TestAccKubernetesCluster_apiServerAuthorizedIPRanges(t *testing.T) { @@ -32,10 +33,10 @@ func testAccKubernetesCluster_apiServerAuthorizedIPRanges(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.apiServerAuthorizedIPRangesConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("role_based_access_control.#").HasValue("1"), check.That(data.ResourceName).Key("role_based_access_control.0.enabled").HasValue("false"), @@ -61,19 +62,14 @@ func TestAccKubernetesCluster_managedClusterIdentity(t *testing.T) { testAccKubernetesCluster_managedClusterIdentity(t) } -func TestAccKubernetesCluster_userAssignedIdentity(t *testing.T) { - checkIfShouldRunTestsIndividually(t) - testAccKubernetesCluster_userAssignedIdentity(t) -} - func testAccKubernetesCluster_managedClusterIdentity(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.managedClusterIdentityConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.0.type").HasValue("SystemAssigned"), check.That(data.ResourceName).Key("kubelet_identity.0.client_id").Exists(), @@ -86,14 +82,19 @@ func testAccKubernetesCluster_managedClusterIdentity(t *testing.T) { }) } +func TestAccKubernetesCluster_userAssignedIdentity(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccKubernetesCluster_userAssignedIdentity(t) +} + func testAccKubernetesCluster_userAssignedIdentity(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.userAssignedIdentityConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.0.type").HasValue("UserAssigned"), check.That(data.ResourceName).Key("identity.0.user_assigned_identity_id").Exists(), @@ -112,18 +113,41 @@ func testAccKubernetesCluster_updateWithUserAssignedIdentity(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.userAssignedIdentityConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.updateWithUserAssignedIdentity(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccKubernetesCluster_userAssignedKubeletIdentity(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccKubernetesCluster_userAssignedKubeletIdentity(t) +} + +func testAccKubernetesCluster_userAssignedKubeletIdentity(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + r := KubernetesClusterResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.userAssignedKubeletIdentityConfig(data), + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("identity.0.type").HasValue("UserAssigned"), + check.That(data.ResourceName).Key("identity.0.user_assigned_identity_id").Exists(), + check.That(data.ResourceName).Key("kubelet_identity.0.user_assigned_identity_id").Exists(), ), }, data.ImportStep(), @@ -139,10 +163,10 @@ func testAccKubernetesCluster_roleBasedAccessControl(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.roleBasedAccessControlConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("role_based_access_control.#").HasValue("1"), check.That(data.ResourceName).Key("role_based_access_control.0.enabled").HasValue("true"), @@ -166,10 +190,10 @@ func testAccKubernetesCluster_roleBasedAccessControlAAD(t *testing.T) { clientData := data.Client() auth := clientData.Default - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.roleBasedAccessControlAADConfig(data, auth.ClientID, auth.ClientSecret, ""), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("role_based_access_control.#").HasValue("1"), check.That(data.ResourceName).Key("role_based_access_control.0.enabled").HasValue("true"), @@ -189,7 +213,7 @@ func testAccKubernetesCluster_roleBasedAccessControlAAD(t *testing.T) { // should be no changes since the default for Tenant ID comes from the Provider block Config: r.roleBasedAccessControlAADConfig(data, auth.ClientID, auth.ClientSecret, clientData.TenantID), PlanOnly: true, - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -207,10 +231,10 @@ func testAccKubernetesCluster_roleBasedAccessControlAADUpdateToManaged(t *testin clientData := data.Client() auth := clientData.Default - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.roleBasedAccessControlAADConfig(data, auth.ClientID, auth.ClientSecret, ""), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("role_based_access_control.#").HasValue("1"), check.That(data.ResourceName).Key("role_based_access_control.0.enabled").HasValue("true"), @@ -228,7 +252,7 @@ func testAccKubernetesCluster_roleBasedAccessControlAADUpdateToManaged(t *testin ), { Config: r.roleBasedAccessControlAADManagedConfig(data, ""), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("role_based_access_control.#").HasValue("1"), check.That(data.ResourceName).Key("role_based_access_control.0.enabled").HasValue("true"), @@ -255,10 +279,10 @@ func testAccKubernetesCluster_roleBasedAccessControlAADManaged(t *testing.T) { r := KubernetesClusterResource{} clientData := data.Client() - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.roleBasedAccessControlAADManagedConfig(data, ""), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("role_based_access_control.#").HasValue("1"), check.That(data.ResourceName).Key("role_based_access_control.0.enabled").HasValue("true"), @@ -276,7 +300,7 @@ func testAccKubernetesCluster_roleBasedAccessControlAADManaged(t *testing.T) { // should be no changes since the default for Tenant ID comes from the Provider block Config: r.roleBasedAccessControlAADManagedConfig(data, clientData.TenantID), PlanOnly: true, - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -296,16 +320,17 @@ func testAccKubernetesCluster_roleBasedAccessControlAADManagedChange(t *testing. r := KubernetesClusterResource{} clientData := data.Client() - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.roleBasedAccessControlAADManagedConfig(data, ""), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("role_based_access_control.#").HasValue("1"), check.That(data.ResourceName).Key("role_based_access_control.0.enabled").HasValue("true"), check.That(data.ResourceName).Key("role_based_access_control.0.azure_active_directory.#").HasValue("1"), check.That(data.ResourceName).Key("role_based_access_control.0.azure_active_directory.0.tenant_id").Exists(), check.That(data.ResourceName).Key("role_based_access_control.0.azure_active_directory.0.managed").Exists(), + check.That(data.ResourceName).Key("role_based_access_control.0.azure_active_directory.0.azure_rbac_enabled").HasValue("false"), check.That(data.ResourceName).Key("kube_admin_config.#").HasValue("1"), check.That(data.ResourceName).Key("kube_admin_config_raw").Exists(), check.That(data.ResourceName).Key("default_node_pool.0.node_count").HasValue("1"), @@ -316,7 +341,7 @@ func testAccKubernetesCluster_roleBasedAccessControlAADManagedChange(t *testing. ), { Config: r.roleBasedAccessControlAADManagedConfigScale(data, clientData.TenantID), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("default_node_pool.0.node_count").HasValue("2"), ), @@ -337,10 +362,28 @@ func testAccKubernetesCluster_roleBasedAccessControlAzure(t *testing.T) { r := KubernetesClusterResource{} clientData := data.Client() - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.roleBasedAccessControlAADManagedConfig(data, ""), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("role_based_access_control.#").HasValue("1"), + check.That(data.ResourceName).Key("role_based_access_control.0.enabled").HasValue("true"), + check.That(data.ResourceName).Key("role_based_access_control.0.azure_active_directory.#").HasValue("1"), + check.That(data.ResourceName).Key("role_based_access_control.0.azure_active_directory.0.tenant_id").Exists(), + check.That(data.ResourceName).Key("role_based_access_control.0.azure_active_directory.0.managed").Exists(), + check.That(data.ResourceName).Key("role_based_access_control.0.azure_active_directory.0.azure_rbac_enabled").HasValue("false"), + check.That(data.ResourceName).Key("kube_admin_config.#").HasValue("1"), + check.That(data.ResourceName).Key("kube_admin_config_raw").Exists(), + check.That(data.ResourceName).Key("default_node_pool.0.node_count").HasValue("1"), + ), + }, + data.ImportStep( + "role_based_access_control.0.azure_active_directory.0.server_app_secret", + ), { Config: r.roleBasedAccessControlAzureConfig(data, ""), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("role_based_access_control.#").HasValue("1"), check.That(data.ResourceName).Key("role_based_access_control.0.enabled").HasValue("true"), @@ -359,7 +402,7 @@ func testAccKubernetesCluster_roleBasedAccessControlAzure(t *testing.T) { // should be no changes since the default for Tenant ID comes from the Provider block Config: r.roleBasedAccessControlAzureConfig(data, clientData.TenantID), PlanOnly: true, - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -379,10 +422,10 @@ func testAccKubernetesCluster_servicePrincipal(t *testing.T) { r := KubernetesClusterResource{} clientData := data.Client() - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.servicePrincipalConfig(data, clientData.Default.ClientID, clientData.Default.ClientSecret), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.%").HasValue("0"), ), @@ -390,12 +433,77 @@ func testAccKubernetesCluster_servicePrincipal(t *testing.T) { data.ImportStep("service_principal.0.client_secret"), { Config: r.servicePrincipalConfig(data, clientData.Alternate.ClientID, clientData.Alternate.ClientSecret), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("identity.%").HasValue("0"), + ), + }, + data.ImportStep("service_principal.0.client_secret"), + }) +} + +func TestAccKubernetesCluster_servicePrincipalToSystemAssignedIdentity(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccKubernetesCluster_servicePrincipalToSystemAssignedIdentity(t) +} + +func testAccKubernetesCluster_servicePrincipalToSystemAssignedIdentity(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + r := KubernetesClusterResource{} + clientData := data.Client() + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.servicePrincipalConfig(data, clientData.Default.ClientID, clientData.Default.ClientSecret), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("identity.%").HasValue("0"), + ), + }, + data.ImportStep("service_principal.0.client_secret"), + { + Config: r.managedClusterIdentityConfig(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("identity.0.type").HasValue("SystemAssigned"), + check.That(data.ResourceName).Key("kubelet_identity.0.client_id").Exists(), + check.That(data.ResourceName).Key("kubelet_identity.0.object_id").Exists(), + check.That(data.ResourceName).Key("kubelet_identity.0.user_assigned_identity_id").Exists(), + check.That(data.ResourceName).Key("service_principal.%").HasValue("0"), + ), + }, + data.ImportStep(), + }) +} + +func TestAccKubernetesCluster_servicePrincipalToUserAssignedIdentity(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccKubernetesCluster_servicePrincipalToUserAssignedIdentity(t) +} + +func testAccKubernetesCluster_servicePrincipalToUserAssignedIdentity(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + r := KubernetesClusterResource{} + clientData := data.Client() + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.servicePrincipalConfig(data, clientData.Default.ClientID, clientData.Default.ClientSecret), + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.%").HasValue("0"), ), }, data.ImportStep("service_principal.0.client_secret"), + { + Config: r.userAssignedIdentityConfig(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("identity.0.type").HasValue("UserAssigned"), + check.That(data.ResourceName).Key("identity.0.user_assigned_identity_id").Exists(), + ), + }, + data.ImportStep(), }) } @@ -412,10 +520,10 @@ func testAccKubernetesCluster_updateRoleBaseAccessControlAAD(t *testing.T) { auth := clientData.Default altAlt := clientData.Alternate - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.roleBasedAccessControlAADConfig(data, auth.ClientID, auth.ClientSecret, clientData.TenantID), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("role_based_access_control.#").HasValue("1"), check.That(data.ResourceName).Key("role_based_access_control.0.enabled").HasValue("true"), @@ -433,7 +541,7 @@ func testAccKubernetesCluster_updateRoleBaseAccessControlAAD(t *testing.T) { ), { Config: r.updateRoleBasedAccessControlAADConfig(data, altAlt.ClientID, altAlt.ClientSecret, clientData.TenantID), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("role_based_access_control.#").HasValue("1"), check.That(data.ResourceName).Key("role_based_access_control.0.enabled").HasValue("true"), @@ -617,6 +725,63 @@ resource "azurerm_kubernetes_cluster" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) } +func (KubernetesClusterResource) userAssignedKubeletIdentityConfig(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%d" + location = "%s" +} + +resource "azurerm_user_assigned_identity" "aks_identity_test" { + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + name = "test_identity" +} + +resource "azurerm_user_assigned_identity" "kubelet_identity_test" { + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + name = "test_kubelet_identity" +} + +resource "azurerm_role_assignment" "manage_kubelet_identity" { + scope = azurerm_resource_group.test.id + role_definition_name = "Managed Identity Operator" + principal_id = azurerm_user_assigned_identity.aks_identity_test.principal_id + skip_service_principal_aad_check = false +} + +resource "azurerm_kubernetes_cluster" "test" { + depends_on = [azurerm_role_assignment.manage_kubelet_identity] + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + } + + identity { + type = "UserAssigned" + user_assigned_identity_id = azurerm_user_assigned_identity.aks_identity_test.id + } + + kubelet_identity { + user_assigned_identity_id = azurerm_user_assigned_identity.kubelet_identity_test.id + client_id = azurerm_user_assigned_identity.kubelet_identity_test.client_id + object_id = azurerm_user_assigned_identity.kubelet_identity_test.principal_id + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) +} + func (KubernetesClusterResource) roleBasedAccessControlConfig(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { @@ -747,8 +912,9 @@ resource "azurerm_kubernetes_cluster" "test" { enabled = true azure_active_directory { - tenant_id = var.tenant_id - managed = true + tenant_id = var.tenant_id + managed = true + azure_rbac_enabled = false } } } diff --git a/azurerm/internal/services/containers/kubernetes_cluster_data_source.go b/azurerm/internal/services/containers/kubernetes_cluster_data_source.go index 2dabe9e1f155..e4f01979c810 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_data_source.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_data_source.go @@ -6,27 +6,27 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-03-01/containerservice" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/kubernetes" msiparse "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/msi/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceKubernetesCluster() *schema.Resource { - return &schema.Resource{ +func dataSourceKubernetesCluster() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceKubernetesClusterRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, @@ -35,21 +35,21 @@ func dataSourceKubernetesCluster() *schema.Resource { "location": azure.SchemaLocationForDataSource(), "addon_profile": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "http_application_routing": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "http_application_routing_zone_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -57,33 +57,33 @@ func dataSourceKubernetesCluster() *schema.Resource { }, "oms_agent": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "log_analytics_workspace_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "oms_agent_identity": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "client_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "object_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "user_assigned_identity_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -94,12 +94,12 @@ func dataSourceKubernetesCluster() *schema.Resource { }, "kube_dashboard": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, }, @@ -107,12 +107,12 @@ func dataSourceKubernetesCluster() *schema.Resource { }, "azure_policy": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, }, @@ -120,45 +120,45 @@ func dataSourceKubernetesCluster() *schema.Resource { }, "ingress_application_gateway": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "gateway_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "effective_gateway_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "subnet_cidr": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "subnet_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "ingress_application_gateway_identity": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "client_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "object_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "user_assigned_identity_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -172,96 +172,101 @@ func dataSourceKubernetesCluster() *schema.Resource { }, "agent_pool_profile": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "max_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "min_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "enable_auto_scaling": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "availability_zones": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "vm_size": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "tags": tags.SchemaDataSource(), "os_disk_size_gb": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "vnet_subnet_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "os_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "orchestrator_version": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "max_pods": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "node_labels": { - Type: schema.TypeMap, + Type: pluginsdk.TypeMap, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "node_taints": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, }, "enable_node_public_ip": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, + Computed: true, + }, + + "node_public_ip_prefix_id": { + Type: pluginsdk.TypeString, Computed: true, }, @@ -271,62 +276,62 @@ func dataSourceKubernetesCluster() *schema.Resource { }, "dns_prefix": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "fqdn": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "api_server_authorized_ip_ranges": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "disk_encryption_set_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "private_link_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "private_cluster_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, // TODO -- remove this when deprecation resolves }, "private_fqdn": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "identity": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "user_assigned_identity_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "principal_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "tenant_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -334,39 +339,39 @@ func dataSourceKubernetesCluster() *schema.Resource { }, "kubernetes_version": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "kube_admin_config": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "host": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "username": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "password": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "client_certificate": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "client_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "cluster_ca_certificate": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -374,40 +379,40 @@ func dataSourceKubernetesCluster() *schema.Resource { }, "kube_admin_config_raw": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "kube_config": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "host": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "username": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "password": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "client_certificate": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "client_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "cluster_ca_certificate": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -415,26 +420,26 @@ func dataSourceKubernetesCluster() *schema.Resource { }, "kube_config_raw": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "kubelet_identity": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "client_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "object_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "user_assigned_identity_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -442,22 +447,22 @@ func dataSourceKubernetesCluster() *schema.Resource { }, "linux_profile": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "admin_username": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "ssh_key": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "key_data": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -468,12 +473,12 @@ func dataSourceKubernetesCluster() *schema.Resource { }, "windows_profile": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "admin_username": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -481,42 +486,42 @@ func dataSourceKubernetesCluster() *schema.Resource { }, "network_profile": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "network_plugin": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "network_policy": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "service_cidr": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "dns_service_ip": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "docker_bridge_cidr": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "pod_cidr": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "load_balancer_sku": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -524,49 +529,49 @@ func dataSourceKubernetesCluster() *schema.Resource { }, "node_resource_group": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "role_based_access_control": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "azure_active_directory": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "admin_group_object_ids": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "client_app_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "managed": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "server_app_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "tenant_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -577,12 +582,12 @@ func dataSourceKubernetesCluster() *schema.Resource { }, "service_principal": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "client_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -594,7 +599,7 @@ func dataSourceKubernetesCluster() *schema.Resource { } } -func dataSourceKubernetesClusterRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceKubernetesClusterRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.KubernetesClustersClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -970,6 +975,11 @@ func flattenKubernetesClusterDataSourceAgentPoolProfiles(input *[]containerservi count = int(*profile.Count) } + enableNodePublicIP := false + if profile.EnableNodePublicIP != nil { + enableNodePublicIP = *profile.EnableNodePublicIP + } + minCount := 0 if profile.MinCount != nil { minCount = int(*profile.MinCount) @@ -990,6 +1000,11 @@ func flattenKubernetesClusterDataSourceAgentPoolProfiles(input *[]containerservi name = *profile.Name } + nodePublicIPPrefixID := "" + if profile.NodePublicIPPrefixID != nil { + nodePublicIPPrefixID = *profile.NodePublicIPPrefixID + } + osDiskSizeGb := 0 if profile.OsDiskSizeGB != nil { osDiskSizeGb = int(*profile.OsDiskSizeGB) @@ -1026,35 +1041,31 @@ func flattenKubernetesClusterDataSourceAgentPoolProfiles(input *[]containerservi nodeTaints = *profile.NodeTaints } - enableNodePublicIP := false - if profile.EnableNodePublicIP != nil { - enableNodePublicIP = *profile.EnableNodePublicIP - } - vmSize := "" if profile.VMSize != nil { vmSize = *profile.VMSize } agentPoolProfiles = append(agentPoolProfiles, map[string]interface{}{ - "availability_zones": utils.FlattenStringSlice(profile.AvailabilityZones), - "count": count, - "enable_auto_scaling": enableAutoScaling, - "enable_node_public_ip": enableNodePublicIP, - "max_count": maxCount, - "max_pods": maxPods, - "min_count": minCount, - "name": name, - "node_labels": nodeLabels, - "node_taints": nodeTaints, - "orchestrator_version": orchestratorVersion, - "os_disk_size_gb": osDiskSizeGb, - "os_type": string(profile.OsType), - "tags": tags.Flatten(profile.Tags), - "type": string(profile.Type), - "upgrade_settings": flattenUpgradeSettings(profile.UpgradeSettings), - "vm_size": vmSize, - "vnet_subnet_id": vnetSubnetId, + "availability_zones": utils.FlattenStringSlice(profile.AvailabilityZones), + "count": count, + "enable_auto_scaling": enableAutoScaling, + "enable_node_public_ip": enableNodePublicIP, + "max_count": maxCount, + "max_pods": maxPods, + "min_count": minCount, + "name": name, + "node_labels": nodeLabels, + "node_public_ip_prefix_id": nodePublicIPPrefixID, + "node_taints": nodeTaints, + "orchestrator_version": orchestratorVersion, + "os_disk_size_gb": osDiskSizeGb, + "os_type": string(profile.OsType), + "tags": tags.Flatten(profile.Tags), + "type": string(profile.Type), + "upgrade_settings": flattenUpgradeSettings(profile.UpgradeSettings), + "vm_size": vmSize, + "vnet_subnet_id": vnetSubnetId, }) } diff --git a/azurerm/internal/services/containers/kubernetes_cluster_data_source_test.go b/azurerm/internal/services/containers/kubernetes_cluster_data_source_test.go index 3fe7e19f170b..067b97fb9457 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_data_source_test.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_data_source_test.go @@ -5,7 +5,6 @@ import ( "os" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -36,7 +35,7 @@ var kubernetesDataSourceTests = map[string]func(t *testing.T){ "autoscalingNoAvailabilityZones": testAccDataSourceKubernetesCluster_autoscalingNoAvailabilityZones, "autoscalingWithAvailabilityZones": testAccDataSourceKubernetesCluster_autoscalingWithAvailabilityZones, "nodeLabels": testAccDataSourceKubernetesCluster_nodeLabels, - "enableNodePublicIP": testAccDataSourceKubernetesCluster_enableNodePublicIP, + "nodePublicIP": testAccDataSourceKubernetesCluster_nodePublicIP, "privateCluster": testAccDataSourceKubernetesCluster_privateCluster, } @@ -49,10 +48,10 @@ func testAccDataSourceKubernetesCluster_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster", "test") r := KubernetesClusterDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basicConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("role_based_access_control.#").HasValue("1"), check.That(data.ResourceName).Key("role_based_access_control.0.enabled").HasValue("false"), check.That(data.ResourceName).Key("kube_config.0.client_key").Exists(), @@ -82,10 +81,10 @@ func TestAccDataSourceKubernetesCluster_privateCluster(t *testing.T) { func testAccDataSourceKubernetesCluster_privateCluster(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: KubernetesClusterResource{}.privateClusterConfig(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("private_fqdn").Exists(), check.That(data.ResourceName).Key("private_cluster_enabled").HasValue("true"), ), @@ -103,10 +102,10 @@ func testAccDataSourceKubernetesCluster_roleBasedAccessControl(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster", "test") r := KubernetesClusterDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.roleBasedAccessControlConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("role_based_access_control.#").HasValue("1"), check.That(data.ResourceName).Key("role_based_access_control.0.enabled").HasValue("true"), check.That(data.ResourceName).Key("role_based_access_control.0.azure_active_directory.#").HasValue("0"), @@ -129,10 +128,10 @@ func testAccDataSourceKubernetesCluster_roleBasedAccessControlAAD(t *testing.T) clientSecret := os.Getenv("ARM_CLIENT_SECRET") tenantId := os.Getenv("ARM_TENANT_ID") - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.roleBasedAccessControlAADConfig(data, clientId, clientSecret, tenantId), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("role_based_access_control.#").HasValue("1"), check.That(data.ResourceName).Key("role_based_access_control.0.enabled").HasValue("true"), check.That(data.ResourceName).Key("role_based_access_control.0.azure_active_directory.#").HasValue("1"), @@ -155,10 +154,10 @@ func testAccDataSourceKubernetesCluster_internalNetwork(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster", "test") r := KubernetesClusterDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.internalNetworkConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("agent_pool_profile.0.vnet_subnet_id").Exists(), ), }, @@ -174,10 +173,10 @@ func testAccDataSourceKubernetesCluster_advancedNetworkingAzure(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster", "test") r := KubernetesClusterDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.advancedNetworkingAzureConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("agent_pool_profile.0.vnet_subnet_id").Exists(), check.That(data.ResourceName).Key("network_profile.0.network_plugin").HasValue("azure"), check.That(data.ResourceName).Key("network_profile.0.network_plugin").Exists(), @@ -198,10 +197,10 @@ func testAccDataSourceKubernetesCluster_advancedNetworkingAzureCalicoPolicy(t *t data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster", "test") r := KubernetesClusterDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.advancedNetworkingAzureCalicoPolicyConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("agent_pool_profile.0.vnet_subnet_id").Exists(), check.That(data.ResourceName).Key("network_profile.0.network_plugin").HasValue("azure"), check.That(data.ResourceName).Key("network_profile.0.network_policy").HasValue("calico"), @@ -224,10 +223,10 @@ func testAccDataSourceKubernetesCluster_advancedNetworkingAzureNPMPolicy(t *test data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster", "test") r := KubernetesClusterDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.advancedNetworkingAzureNPMPolicyConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("agent_pool_profile.0.vnet_subnet_id").Exists(), check.That(data.ResourceName).Key("network_profile.0.network_plugin").HasValue("azure"), check.That(data.ResourceName).Key("network_profile.0.network_policy").HasValue("azure"), @@ -250,10 +249,10 @@ func testAccDataSourceKubernetesCluster_advancedNetworkingAzureComplete(t *testi data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster", "test") r := KubernetesClusterDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.advancedNetworkingAzureCompleteConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("agent_pool_profile.0.vnet_subnet_id").Exists(), check.That(data.ResourceName).Key("network_profile.0.network_plugin").HasValue("azure"), check.That(data.ResourceName).Key("network_profile.0.network_plugin").Exists(), @@ -274,10 +273,10 @@ func testAccDataSourceKubernetesCluster_advancedNetworkingAzureCalicoPolicyCompl data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster", "test") r := KubernetesClusterDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.advancedNetworkingAzureCalicoPolicyCompleteConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("agent_pool_profile.0.vnet_subnet_id").Exists(), check.That(data.ResourceName).Key("network_profile.0.network_plugin").HasValue("azure"), check.That(data.ResourceName).Key("network_profile.0.network_policy").HasValue("calico"), @@ -300,10 +299,10 @@ func testAccDataSourceKubernetesCluster_advancedNetworkingAzureNPMPolicyComplete data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster", "test") r := KubernetesClusterDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.advancedNetworkingAzureNPMPolicyCompleteConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("agent_pool_profile.0.vnet_subnet_id").Exists(), check.That(data.ResourceName).Key("network_profile.0.network_plugin").HasValue("azure"), check.That(data.ResourceName).Key("network_profile.0.network_policy").HasValue("azure"), @@ -326,10 +325,10 @@ func testAccDataSourceKubernetesCluster_advancedNetworkingKubenet(t *testing.T) data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster", "test") r := KubernetesClusterDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.advancedNetworkingKubenetConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("agent_pool_profile.0.vnet_subnet_id").Exists(), check.That(data.ResourceName).Key("network_profile.0.network_plugin").HasValue("kubenet"), check.That(data.ResourceName).Key("network_profile.0.network_plugin").Exists(), @@ -350,10 +349,10 @@ func testAccDataSourceKubernetesCluster_advancedNetworkingKubenetComplete(t *tes data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster", "test") r := KubernetesClusterDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.advancedNetworkingKubenetCompleteConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("agent_pool_profile.0.vnet_subnet_id").Exists(), check.That(data.ResourceName).Key("network_profile.0.network_plugin").HasValue("kubenet"), check.That(data.ResourceName).Key("network_profile.0.network_plugin").Exists(), @@ -374,10 +373,10 @@ func testAccDataSourceKubernetesCluster_addOnProfileOMS(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster", "test") r := KubernetesClusterDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.addOnProfileOMSConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("addon_profile.#").HasValue("1"), check.That(data.ResourceName).Key("addon_profile.0.oms_agent.#").HasValue("1"), check.That(data.ResourceName).Key("addon_profile.0.oms_agent.0.enabled").HasValue("true"), @@ -399,10 +398,10 @@ func testAccDataSourceKubernetesCluster_addOnProfileKubeDashboard(t *testing.T) data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster", "test") r := KubernetesClusterDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.addOnProfileKubeDashboardConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("addon_profile.#").HasValue("1"), check.That(data.ResourceName).Key("addon_profile.0.kube_dashboard.#").HasValue("1"), check.That(data.ResourceName).Key("addon_profile.0.kube_dashboard.0.enabled").HasValue("false"), @@ -420,10 +419,10 @@ func testAccDataSourceKubernetesCluster_addOnProfileAzurePolicy(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.addOnProfileAzurePolicyConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("addon_profile.0.azure_policy.#").HasValue("1"), check.That(data.ResourceName).Key("addon_profile.0.azure_policy.0.enabled").HasValue("true"), ), @@ -440,10 +439,10 @@ func testAccDataSourceKubernetesCluster_addOnProfileRouting(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster", "test") r := KubernetesClusterDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.addOnProfileRoutingConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("addon_profile.#").HasValue("1"), check.That(data.ResourceName).Key("addon_profile.0.http_application_routing.#").HasValue("1"), check.That(data.ResourceName).Key("addon_profile.0.http_application_routing.0.enabled").HasValue("true"), @@ -462,10 +461,10 @@ func testAccDataSourceKubernetesCluster_addOnProfileIngressApplicationGatewayApp data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster", "test") r := KubernetesClusterDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.addOnProfileIngressApplicationGatewayAppGatewayConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("addon_profile.#").HasValue("1"), check.That(data.ResourceName).Key("addon_profile.0.ingress_application_gateway.#").HasValue("1"), check.That(data.ResourceName).Key("addon_profile.0.ingress_application_gateway.0.enabled").HasValue("true"), @@ -491,10 +490,10 @@ func testAccDataSourceKubernetesCluster_addOnProfileIngressApplicationGatewaySub data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster", "test") r := KubernetesClusterDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.addOnProfileIngressApplicationGatewaySubnetCIDRConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("addon_profile.#").HasValue("1"), check.That(data.ResourceName).Key("addon_profile.0.ingress_application_gateway.#").HasValue("1"), check.That(data.ResourceName).Key("addon_profile.0.ingress_application_gateway.0.enabled").HasValue("true"), @@ -515,10 +514,10 @@ func testAccDataSourceKubernetesCluster_addOnProfileIngressApplicationGatewaySub data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster", "test") r := KubernetesClusterDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.addOnProfileIngressApplicationGatewaySubnetIdConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("addon_profile.#").HasValue("1"), check.That(data.ResourceName).Key("addon_profile.0.ingress_application_gateway.#").HasValue("1"), check.That(data.ResourceName).Key("addon_profile.0.ingress_application_gateway.0.enabled").HasValue("true"), @@ -538,15 +537,15 @@ func testAccDataSourceKubernetesCluster_autoscalingNoAvailabilityZones(t *testin data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster", "test") r := KubernetesClusterDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.autoScalingNoAvailabilityZonesConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("agent_pool_profile.0.min_count").HasValue("1"), check.That(data.ResourceName).Key("agent_pool_profile.0.max_count").HasValue("2"), check.That(data.ResourceName).Key("agent_pool_profile.0.type").HasValue("VirtualMachineScaleSets"), check.That(data.ResourceName).Key("agent_pool_profile.0.enable_auto_scaling").HasValue("true"), - resource.TestCheckNoResourceAttr(data.ResourceName, "agent_pool_profile.0.availability_zones"), + acceptance.TestCheckNoResourceAttr(data.ResourceName, "agent_pool_profile.0.availability_zones"), ), }, }) @@ -561,10 +560,10 @@ func testAccDataSourceKubernetesCluster_autoscalingWithAvailabilityZones(t *test data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster", "test") r := KubernetesClusterDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.autoScalingWithAvailabilityZonesConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("agent_pool_profile.0.min_count").HasValue("1"), check.That(data.ResourceName).Key("agent_pool_profile.0.max_count").HasValue("2"), check.That(data.ResourceName).Key("agent_pool_profile.0.type").HasValue("VirtualMachineScaleSets"), @@ -587,30 +586,31 @@ func testAccDataSourceKubernetesCluster_nodeLabels(t *testing.T) { r := KubernetesClusterDataSource{} labels := map[string]string{"key": "value"} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.nodeLabelsConfig(data, labels), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("agent_pool_profile.0.node_labels.key").HasValue("value"), ), }, }) } -func TestAccDataSourceKubernetesCluster_enableNodePublicIP(t *testing.T) { +func TestAccDataSourceKubernetesCluster_nodePublicIP(t *testing.T) { checkIfShouldRunTestsIndividually(t) - testAccDataSourceKubernetesCluster_enableNodePublicIP(t) + testAccDataSourceKubernetesCluster_nodePublicIP(t) } -func testAccDataSourceKubernetesCluster_enableNodePublicIP(t *testing.T) { +func testAccDataSourceKubernetesCluster_nodePublicIP(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster", "test") r := KubernetesClusterDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { - Config: r.enableNodePublicIPConfig(data), - Check: resource.ComposeTestCheckFunc( + Config: r.nodePublicIPConfig(data), + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("agent_pool_profile.0.enable_node_public_ip").HasValue("true"), + check.That(data.ResourceName).Key("agent_pool_profile.0.node_public_ip_prefix_id").Exists(), ), }, }) @@ -858,7 +858,7 @@ data "azurerm_kubernetes_cluster" "test" { `, KubernetesClusterResource{}.nodeLabelsConfig(data, labels)) } -func (KubernetesClusterDataSource) enableNodePublicIPConfig(data acceptance.TestData) string { +func (KubernetesClusterDataSource) nodePublicIPConfig(data acceptance.TestData) string { return fmt.Sprintf(` %s @@ -866,5 +866,5 @@ data "azurerm_kubernetes_cluster" "test" { name = azurerm_kubernetes_cluster.test.name resource_group_name = azurerm_kubernetes_cluster.test.resource_group_name } -`, KubernetesClusterResource{}.enableNodePublicIPConfig(data, true)) +`, KubernetesClusterResource{}.nodePublicIPPrefixConfig(data)) } diff --git a/azurerm/internal/services/containers/kubernetes_cluster_network_resource_test.go b/azurerm/internal/services/containers/kubernetes_cluster_network_resource_test.go index fbe9f2e750f2..c2bde05cd4a1 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_network_resource_test.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_network_resource_test.go @@ -6,7 +6,6 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -41,10 +40,10 @@ func testAccKubernetesCluster_advancedNetworkingKubenet(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.advancedNetworkingConfig(data, "kubenet"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_profile.0.network_plugin").HasValue("kubenet"), ), @@ -62,10 +61,10 @@ func testAccKubernetesCluster_advancedNetworkingKubenetComplete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.advancedNetworkingCompleteConfig(data, "kubenet"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_profile.0.network_plugin").HasValue("kubenet"), ), @@ -83,10 +82,10 @@ func testAccKubernetesCluster_advancedNetworkingAzure(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.advancedNetworkingConfig(data, "azure"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_profile.0.network_plugin").HasValue("azure"), ), @@ -104,10 +103,10 @@ func testAccKubernetesCluster_advancedNetworkingAzureComplete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.advancedNetworkingCompleteConfig(data, "azure"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_profile.0.network_plugin").HasValue("azure"), ), @@ -125,10 +124,10 @@ func testAccKubernetesCluster_advancedNetworkingAzureCalicoPolicy(t *testing.T) data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.advancedNetworkingWithPolicyConfig(data, "azure", "calico"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_profile.0.network_plugin").HasValue("azure"), check.That(data.ResourceName).Key("network_profile.0.network_policy").HasValue("calico"), @@ -147,10 +146,10 @@ func testAccKubernetesCluster_advancedNetworkingAzureCalicoPolicyComplete(t *tes data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.advancedNetworkingWithPolicyCompleteConfig(data, "azure", "calico"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_profile.0.network_plugin").HasValue("azure"), check.That(data.ResourceName).Key("network_profile.0.network_policy").HasValue("calico"), @@ -164,10 +163,10 @@ func TestAccKubernetesCluster_advancedNetworkingAzureCalicoPolicyNetworkModeTran data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.advancedNetworkingWithPolicyNetworkMode(data, "azure", "calico", "transparent"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_profile.0.network_plugin").HasValue("azure"), check.That(data.ResourceName).Key("network_profile.0.network_policy").HasValue("calico"), @@ -187,10 +186,10 @@ func testAccKubernetesCluster_advancedNetworkingAzureNPMPolicy(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.advancedNetworkingWithPolicyConfig(data, "azure", "azure"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_profile.0.network_plugin").HasValue("azure"), check.That(data.ResourceName).Key("network_profile.0.network_policy").HasValue("azure"), @@ -209,10 +208,10 @@ func testAccKubernetesCluster_advancedNetworkingAzureNPMPolicyComplete(t *testin data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.advancedNetworkingWithPolicyCompleteConfig(data, "azure", "azure"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_profile.0.network_plugin").HasValue("azure"), check.That(data.ResourceName).Key("network_profile.0.network_policy").HasValue("azure"), @@ -231,29 +230,10 @@ func testAccKubernetesCluster_enableNodePublicIP(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { - // Enabled - Config: r.enableNodePublicIPConfig(data, true), - Check: resource.ComposeTestCheckFunc( - check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("default_node_pool.0.enable_node_public_ip").HasValue("true"), - ), - }, - data.ImportStep(), - { - // Disabled - Config: r.enableNodePublicIPConfig(data, false), - Check: resource.ComposeTestCheckFunc( - check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("default_node_pool.0.enable_node_public_ip").HasValue("false"), - ), - }, - data.ImportStep(), - { - // Enabled - Config: r.enableNodePublicIPConfig(data, true), - Check: resource.ComposeTestCheckFunc( + Config: r.enableNodePublicIPConfig(data), + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("default_node_pool.0.enable_node_public_ip").HasValue("true"), ), @@ -271,10 +251,10 @@ func testAccKubernetesCluster_internalNetwork(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.internalNetworkConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("default_node_pool.0.max_pods").HasValue("60"), ), @@ -283,6 +263,28 @@ func testAccKubernetesCluster_internalNetwork(t *testing.T) { }) } +func TestAccKubernetesCluster_nodePublicIPPrefix(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccKubernetesCluster_nodePublicIPPrefix(t) +} + +func testAccKubernetesCluster_nodePublicIPPrefix(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + r := KubernetesClusterResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.nodePublicIPPrefixConfig(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("default_node_pool.0.enable_node_public_ip").HasValue("true"), + check.That(data.ResourceName).Key("default_node_pool.0.node_public_ip_prefix_id").Exists(), + ), + }, + data.ImportStep(), + }) +} + func TestAccKubernetesCluster_outboundTypeLoadBalancer(t *testing.T) { checkIfShouldRunTestsIndividually(t) testAccKubernetesCluster_outboundTypeLoadBalancer(t) @@ -292,10 +294,10 @@ func testAccKubernetesCluster_outboundTypeLoadBalancer(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.outboundTypeLoadBalancerConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -312,10 +314,10 @@ func testAccKubernetesCluster_privateClusterOn(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.privateClusterConfig(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_fqdn").Exists(), check.That(data.ResourceName).Key("private_cluster_enabled").HasValue("true"), @@ -334,10 +336,10 @@ func testAccKubernetesCluster_privateClusterOnWithPrivateDNSZone(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.privateClusterWithPrivateDNSZoneConfig(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_cluster_enabled").HasValue("true"), ), @@ -357,10 +359,10 @@ func testAccKubernetesCluster_privateClusterOnWithPrivateDNSZoneAndServicePrinci clientSecret := os.Getenv("ARM_CLIENT_SECRET") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.privateClusterWithPrivateDNSZoneAndServicePrincipalConfig(data, true, clientId, clientSecret), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_cluster_enabled").HasValue("true"), ), @@ -377,10 +379,10 @@ func testAccKubernetesCluster_privateClusterOnWithPrivateDNSZoneSubDomain(t *tes data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.privateClusterWithPrivateDNSZoneSubDomain(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -396,10 +398,10 @@ func testAccKubernetesCluster_privateClusterOnWithPrivateDNSZoneSystem(t *testin data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.privateClusterWithPrivateDNSZoneSystemConfig(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_cluster_enabled").HasValue("true"), ), @@ -417,10 +419,10 @@ func testAccKubernetesCluster_privateClusterOff(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.privateClusterConfig(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("private_cluster_enabled").HasValue("false"), ), @@ -438,10 +440,10 @@ func testAccKubernetesCluster_standardLoadBalancer(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.standardLoadBalancerConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_profile.0.load_balancer_sku").HasValue("Standard"), ), @@ -459,10 +461,10 @@ func testAccKubernetesCluster_standardLoadBalancerComplete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.standardLoadBalancerCompleteConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_profile.0.load_balancer_sku").HasValue("Standard"), ), @@ -480,10 +482,10 @@ func testAccKubernetesCluster_standardLoadBalancerProfile(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.standardLoadBalancerProfileConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_profile.0.load_balancer_sku").HasValue("Standard"), check.That(data.ResourceName).Key("network_profile.0.load_balancer_profile.0.managed_outbound_ip_count").HasValue("3"), @@ -505,10 +507,10 @@ func testAccKubernetesCluster_standardLoadBalancerProfileComplete(t *testing.T) data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.standardLoadBalancerProfileCompleteConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_profile.0.load_balancer_sku").HasValue("Standard"), check.That(data.ResourceName).Key("network_profile.0.load_balancer_profile.0.effective_outbound_ips.#").HasValue("1"), @@ -530,10 +532,10 @@ func testAccKubernetesCluster_standardLoadBalancerProfileWithPortAndTimeout(t *t clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.standardLoadBalancerProfileWithPortAndTimeoutConfig(data, clientId, clientSecret), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_profile.0.load_balancer_profile.0.outbound_ports_allocated").HasValue("8000"), check.That(data.ResourceName).Key("network_profile.0.load_balancer_profile.0.idle_timeout_in_minutes").HasValue("10"), @@ -552,7 +554,7 @@ func testAccKubernetesCluster_basicLoadBalancerProfile(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicLoadBalancerProfileConfig(data), ExpectError: regexp.MustCompile("only load balancer SKU 'Standard' supports load balancer profiles. Provided load balancer type: basic"), @@ -569,10 +571,10 @@ func testAccKubernetesCluster_prefixedLoadBalancerProfile(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.prefixedLoadBalancerProfileConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_profile.0.load_balancer_sku").HasValue("Standard"), check.That(data.ResourceName).Key("network_profile.0.load_balancer_profile.0.outbound_ip_prefix_ids.#").HasValue("1"), @@ -592,10 +594,10 @@ func testAccKubernetesCluster_changingLoadBalancerProfile(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.changingLoadBalancerProfileConfigIPPrefix(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_profile.0.load_balancer_sku").HasValue("Standard"), check.That(data.ResourceName).Key("network_profile.0.load_balancer_profile.0.outbound_ip_prefix_ids.#").HasValue("1"), @@ -605,7 +607,7 @@ func testAccKubernetesCluster_changingLoadBalancerProfile(t *testing.T) { data.ImportStep(), { Config: r.changingLoadBalancerProfileConfigManagedIPs(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_profile.0.load_balancer_sku").HasValue("Standard"), check.That(data.ResourceName).Key("network_profile.0.load_balancer_profile.0.managed_outbound_ip_count").HasValue("1"), @@ -615,7 +617,7 @@ func testAccKubernetesCluster_changingLoadBalancerProfile(t *testing.T) { data.ImportStep(), { Config: r.changingLoadBalancerProfileConfigIPIds(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_profile.0.load_balancer_sku").HasValue("Standard"), check.That(data.ResourceName).Key("network_profile.0.load_balancer_profile.0.outbound_ip_address_ids.#").HasValue("1"), @@ -625,7 +627,7 @@ func testAccKubernetesCluster_changingLoadBalancerProfile(t *testing.T) { data.ImportStep(), { Config: r.changingLoadBalancerProfileConfigIPPrefix(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_profile.0.load_balancer_profile.0.outbound_ip_prefix_ids.#").HasValue("1"), check.That(data.ResourceName).Key("network_profile.0.load_balancer_profile.0.effective_outbound_ips.#").HasValue("1"), @@ -972,7 +974,7 @@ resource "azurerm_kubernetes_cluster" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, networkMode, networkPlugin, networkPolicy) } -func (KubernetesClusterResource) enableNodePublicIPConfig(data acceptance.TestData, enabled bool) string { +func (KubernetesClusterResource) enableNodePublicIPConfig(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { features {} @@ -993,14 +995,14 @@ resource "azurerm_kubernetes_cluster" "test" { name = "default" node_count = 1 vm_size = "Standard_DS2_v2" - enable_node_public_ip = %t + enable_node_public_ip = true } identity { type = "SystemAssigned" } } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, enabled) +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) } func (KubernetesClusterResource) internalNetworkConfig(data acceptance.TestData) string { @@ -1057,6 +1059,45 @@ resource "azurerm_kubernetes_cluster" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger) } +func (KubernetesClusterResource) nodePublicIPPrefixConfig(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%d" + location = "%s" +} + +resource "azurerm_public_ip_prefix" "test" { + name = "acctestpipprefix%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + prefix_length = 31 +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + enable_node_public_ip = true + node_public_ip_prefix_id = azurerm_public_ip_prefix.test.id + } + + identity { + type = "SystemAssigned" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger) +} + func (KubernetesClusterResource) outboundTypeLoadBalancerConfig(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { diff --git a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_data_source.go b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_data_source.go index 757ea2b6d1f9..2adfb6690819 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_data_source.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_data_source.go @@ -5,33 +5,33 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-03-01/containerservice" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceKubernetesClusterNodePool() *schema.Resource { - return &schema.Resource{ +func dataSourceKubernetesClusterNodePool() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceKubernetesClusterNodePoolRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.KubernetesAgentPoolName, }, "kubernetes_cluster_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, @@ -40,101 +40,106 @@ func dataSourceKubernetesClusterNodePool() *schema.Resource { // Computed "availability_zones": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "enable_auto_scaling": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "enable_node_public_ip": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "eviction_policy": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "max_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "max_pods": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "mode": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "min_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "node_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "node_labels": { - Type: schema.TypeMap, + Type: pluginsdk.TypeMap, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, + "node_public_ip_prefix_id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "node_taints": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "orchestrator_version": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "os_disk_size_gb": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "os_disk_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "os_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "priority": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "proximity_placement_group_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "spot_max_price": { - Type: schema.TypeFloat, + Type: pluginsdk.TypeFloat, Computed: true, }, @@ -143,19 +148,19 @@ func dataSourceKubernetesClusterNodePool() *schema.Resource { "upgrade_settings": upgradeSettingsForDataSourceSchema(), "vm_size": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "vnet_subnet_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, } } -func dataSourceKubernetesClusterNodePoolRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceKubernetesClusterNodePoolRead(d *pluginsdk.ResourceData, meta interface{}) error { clustersClient := meta.(*clients.Client).Containers.KubernetesClustersClient poolsClient := meta.(*clients.Client).Containers.AgentPoolsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) @@ -241,6 +246,8 @@ func dataSourceKubernetesClusterNodePoolRead(d *schema.ResourceData, meta interf return fmt.Errorf("setting `node_labels`: %+v", err) } + d.Set("node_public_ip_prefix_id", props.NodePublicIPPrefixID) + if err := d.Set("node_taints", utils.FlattenStringSlice(props.NodeTaints)); err != nil { return fmt.Errorf("setting `node_taints`: %+v", err) } diff --git a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_data_source_test.go b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_data_source_test.go index 0c5c1cd7a175..48a862f274b1 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_data_source_test.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -25,10 +24,10 @@ func testAccKubernetesClusterNodePoolDataSource_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basicConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("node_count").HasValue("1"), check.That(data.ResourceName).Key("tags.%").HasValue("1"), check.That(data.ResourceName).Key("tags.environment").HasValue("Staging"), diff --git a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go index 87efd1df4628..1e13072d00ff 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go @@ -7,8 +7,6 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-03-01/containerservice" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -17,12 +15,13 @@ import ( containerValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceKubernetesClusterNodePool() *schema.Resource { - return &schema.Resource{ +func resourceKubernetesClusterNodePool() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceKubernetesClusterNodePoolCreate, Read: resourceKubernetesClusterNodePoolRead, Update: resourceKubernetesClusterNodePoolUpdate, @@ -33,30 +32,30 @@ func resourceKubernetesClusterNodePool() *schema.Resource { return err }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(60 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(60 * time.Minute), - Delete: schema.DefaultTimeout(60 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(60 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(60 * time.Minute), + Delete: pluginsdk.DefaultTimeout(60 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: containerValidate.KubernetesAgentPoolName, }, "kubernetes_cluster_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: containerValidate.ClusterID, }, "node_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validation.IntBetween(0, 1000), @@ -65,7 +64,7 @@ func resourceKubernetesClusterNodePool() *schema.Resource { "tags": tags.Schema(), "vm_size": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, @@ -73,32 +72,33 @@ func resourceKubernetesClusterNodePool() *schema.Resource { // Optional "availability_zones": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "enable_auto_scaling": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, }, "enable_host_encryption": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, ForceNew: true, }, "enable_node_public_ip": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, + ForceNew: true, }, "eviction_policy": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ @@ -107,21 +107,40 @@ func resourceKubernetesClusterNodePool() *schema.Resource { }, false), }, + "kubelet_config": schemaNodePoolKubeletConfig(), + + "linux_os_config": schemaNodePoolLinuxOSConfig(), + + "fips_enabled": { + Type: pluginsdk.TypeBool, + Optional: true, + ForceNew: true, + }, + + "kubelet_disk_type": { + Type: pluginsdk.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{ + string(containerservice.KubeletDiskTypeOS), + }, false), + }, + "max_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, ValidateFunc: validation.IntBetween(0, 1000), }, "max_pods": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ForceNew: true, }, "mode": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: string(containerservice.AgentPoolModeUser), ValidateFunc: validation.StringInSlice([]string{ @@ -131,39 +150,46 @@ func resourceKubernetesClusterNodePool() *schema.Resource { }, "min_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, // NOTE: rather than setting `0` users should instead pass `null` here ValidateFunc: validation.IntBetween(0, 1000), }, "node_labels": { - Type: schema.TypeMap, + Type: pluginsdk.TypeMap, Optional: true, ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, + "node_public_ip_prefix_id": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + RequiredWith: []string{"enable_node_public_ip"}, + }, + "node_taints": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "orchestrator_version": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ValidateFunc: validation.StringIsNotEmpty, }, "os_disk_size_gb": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, ForceNew: true, Computed: true, @@ -171,7 +197,7 @@ func resourceKubernetesClusterNodePool() *schema.Resource { }, "os_disk_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, Default: containerservice.OSDiskTypeManaged, @@ -182,7 +208,7 @@ func resourceKubernetesClusterNodePool() *schema.Resource { }, "os_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, Default: string(containerservice.OSTypeLinux), @@ -193,7 +219,7 @@ func resourceKubernetesClusterNodePool() *schema.Resource { }, "priority": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, Default: string(containerservice.ScaleSetPriorityRegular), @@ -204,14 +230,14 @@ func resourceKubernetesClusterNodePool() *schema.Resource { }, "proximity_placement_group_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: computeValidate.ProximityPlacementGroupID, }, "spot_max_price": { - Type: schema.TypeFloat, + Type: pluginsdk.TypeFloat, Optional: true, ForceNew: true, Default: -1.0, @@ -219,7 +245,7 @@ func resourceKubernetesClusterNodePool() *schema.Resource { }, "vnet_subnet_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: azure.ValidateResourceID, @@ -230,7 +256,7 @@ func resourceKubernetesClusterNodePool() *schema.Resource { } } -func resourceKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta interface{}) error { +func resourceKubernetesClusterNodePoolCreate(d *pluginsdk.ResourceData, meta interface{}) error { containersClient := meta.(*clients.Client).Containers clustersClient := containersClient.KubernetesClustersClient poolsClient := containersClient.AgentPoolsClient @@ -297,7 +323,9 @@ func resourceKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta interf profile := containerservice.ManagedClusterAgentPoolProfileProperties{ OsType: containerservice.OSType(osType), EnableAutoScaling: utils.Bool(enableAutoScaling), + EnableFIPS: utils.Bool(d.Get("fips_enabled").(bool)), EnableNodePublicIP: utils.Bool(d.Get("enable_node_public_ip").(bool)), + KubeletDiskType: containerservice.KubeletDiskType(d.Get("kubelet_disk_type").(string)), Mode: mode, ScaleSetPriority: containerservice.ScaleSetPriority(priority), Tags: tags.Expand(t), @@ -346,6 +374,10 @@ func resourceKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta interf profile.NodeLabels = nodeLabels } + if nodePublicIPPrefixID := d.Get("node_public_ip_prefix_id").(string); nodePublicIPPrefixID != "" { + profile.NodePublicIPPrefixID = utils.String(nodePublicIPPrefixID) + } + nodeTaintsRaw := d.Get("node_taints").([]interface{}) if nodeTaints := utils.ExpandStringSlice(nodeTaintsRaw); len(*nodeTaints) > 0 { profile.NodeTaints = nodeTaints @@ -396,6 +428,21 @@ func resourceKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta interf return fmt.Errorf("`max_count` and `min_count` must be set to `null` when enable_auto_scaling is set to `false`") } + if kubeletConfig := d.Get("kubelet_config").([]interface{}); len(kubeletConfig) > 0 { + profile.KubeletConfig = expandAgentPoolKubeletConfig(kubeletConfig) + } + + if linuxOSConfig := d.Get("linux_os_config").([]interface{}); len(linuxOSConfig) > 0 { + if osType != string(containerservice.OSTypeLinux) { + return fmt.Errorf("`linux_os_config` can only be configured when `os_type` is set to `linux`") + } + linuxOSConfig, err := expandAgentPoolLinuxOSConfig(linuxOSConfig) + if err != nil { + return err + } + profile.LinuxOSConfig = linuxOSConfig + } + parameters := containerservice.AgentPool{ Name: &name, ManagedClusterAgentPoolProfileProperties: &profile, @@ -424,7 +471,7 @@ func resourceKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta interf return resourceKubernetesClusterNodePoolRead(d, meta) } -func resourceKubernetesClusterNodePoolUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceKubernetesClusterNodePoolUpdate(d *pluginsdk.ResourceData, meta interface{}) error { containersClient := meta.(*clients.Client).Containers client := containersClient.AgentPoolsClient ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) @@ -496,6 +543,10 @@ func resourceKubernetesClusterNodePoolUpdate(d *schema.ResourceData, meta interf props.Count = utils.Int32(int32(d.Get("node_count").(int))) } + if d.HasChange("node_public_ip_prefix_id") { + props.NodePublicIPPrefixID = utils.String(d.Get("node_public_ip_prefix_id").(string)) + } + if d.HasChange("orchestrator_version") { // Spot Node pool's can't be updated - Azure Docs: https://docs.microsoft.com/en-us/azure/aks/spot-node-pool // > You can't upgrade a spot node pool since spot node pools can't guarantee cordon and drain. @@ -568,7 +619,7 @@ func resourceKubernetesClusterNodePoolUpdate(d *schema.ResourceData, meta interf return resourceKubernetesClusterNodePoolRead(d, meta) } -func resourceKubernetesClusterNodePoolRead(d *schema.ResourceData, meta interface{}) error { +func resourceKubernetesClusterNodePoolRead(d *pluginsdk.ResourceData, meta interface{}) error { clustersClient := meta.(*clients.Client).Containers.KubernetesClustersClient poolsClient := meta.(*clients.Client).Containers.AgentPoolsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) @@ -613,6 +664,8 @@ func resourceKubernetesClusterNodePoolRead(d *schema.ResourceData, meta interfac d.Set("enable_auto_scaling", props.EnableAutoScaling) d.Set("enable_node_public_ip", props.EnableNodePublicIP) d.Set("enable_host_encryption", props.EnableEncryptionAtHost) + d.Set("fips_enabled", props.EnableFIPS) + d.Set("kubelet_disk_type", string(props.KubeletDiskType)) evictionPolicy := "" if props.ScaleSetEvictionPolicy != "" { @@ -620,6 +673,18 @@ func resourceKubernetesClusterNodePoolRead(d *schema.ResourceData, meta interfac } d.Set("eviction_policy", evictionPolicy) + if err := d.Set("kubelet_config", flattenAgentPoolKubeletConfig(props.KubeletConfig)); err != nil { + return fmt.Errorf("setting `kubelet_config`: %+v", err) + } + + linuxOSConfig, err := flattenAgentPoolLinuxOSConfig(props.LinuxOSConfig) + if err != nil { + return err + } + if err := d.Set("linux_os_config", linuxOSConfig); err != nil { + return fmt.Errorf("setting `linux_os_config`: %+v", err) + } + maxCount := 0 if props.MaxCount != nil { maxCount = int(*props.MaxCount) @@ -654,6 +719,8 @@ func resourceKubernetesClusterNodePoolRead(d *schema.ResourceData, meta interfac return fmt.Errorf("setting `node_labels`: %+v", err) } + d.Set("node_public_ip_prefix_id", props.NodePublicIPPrefixID) + if err := d.Set("node_taints", utils.FlattenStringSlice(props.NodeTaints)); err != nil { return fmt.Errorf("setting `node_taints`: %+v", err) } @@ -698,7 +765,7 @@ func resourceKubernetesClusterNodePoolRead(d *schema.ResourceData, meta interfac return tags.FlattenAndSet(d, resp.Tags) } -func resourceKubernetesClusterNodePoolDelete(d *schema.ResourceData, meta interface{}) error { +func resourceKubernetesClusterNodePoolDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.AgentPoolsClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -720,15 +787,15 @@ func resourceKubernetesClusterNodePoolDelete(d *schema.ResourceData, meta interf return nil } -func upgradeSettingsSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func upgradeSettingsSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "max_surge": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, }, @@ -736,14 +803,14 @@ func upgradeSettingsSchema() *schema.Schema { } } -func upgradeSettingsForDataSourceSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func upgradeSettingsForDataSourceSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "max_surge": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, diff --git a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go index 335374da4709..7e4f80876f8b 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go @@ -8,12 +8,11 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -25,6 +24,9 @@ var kubernetesNodePoolTests = map[string]func(t *testing.T){ "autoScaleUpdate": testAccKubernetesClusterNodePool_autoScaleUpdate, "availabilityZones": testAccKubernetesClusterNodePool_availabilityZones, "errorForAvailabilitySet": testAccKubernetesClusterNodePool_errorForAvailabilitySet, + "kubeletAndLinuxOSConfig": testAccKubernetesClusterNodePool_kubeletAndLinuxOSConfig, + "kubeletAndLinuxOSConfigPartial": testAccKubernetesClusterNodePool_kubeletAndLinuxOSConfigPartial, + "other": testAccKubernetesClusterNodePool_other, "multiplePools": testAccKubernetesClusterNodePool_multiplePools, "manualScale": testAccKubernetesClusterNodePool_manualScale, "manualScaleMultiplePools": testAccKubernetesClusterNodePool_manualScaleMultiplePools, @@ -60,11 +62,11 @@ func testAccKubernetesClusterNodePool_autoScale(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // Enabled Config: r.autoScaleConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("tags.%").HasValue("0"), ), @@ -73,7 +75,7 @@ func testAccKubernetesClusterNodePool_autoScale(t *testing.T) { { // Disabled Config: r.manualScaleConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("tags.environment").HasValue("Staging"), ), @@ -82,7 +84,7 @@ func testAccKubernetesClusterNodePool_autoScale(t *testing.T) { { // Enabled Config: r.autoScaleConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("tags.%").HasValue("0"), ), @@ -100,24 +102,24 @@ func testAccKubernetesClusterNodePool_autoScaleUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.autoScaleNodeCountConfig(data, 1, 3), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.autoScaleNodeCountConfig(data, 3, 5), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.autoScaleNodeCountConfig(data, 0, 3), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -134,10 +136,10 @@ func testAccKubernetesClusterNodePool_availabilityZones(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.availabilityZonesConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -154,7 +156,7 @@ func testAccKubernetesClusterNodePool_errorForAvailabilitySet(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.availabilitySetConfig(data), ExpectError: regexp.MustCompile("must be a VirtualMachineScaleSet to attach multiple node pools"), @@ -162,6 +164,67 @@ func testAccKubernetesClusterNodePool_errorForAvailabilitySet(t *testing.T) { }) } +func TestAccKubernetesClusterNodePool_kubeletAndLinuxOSConfig(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccKubernetesClusterNodePool_kubeletAndLinuxOSConfig(t) +} + +func testAccKubernetesClusterNodePool_kubeletAndLinuxOSConfig(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") + r := KubernetesClusterNodePoolResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.kubeletAndLinuxOSConfig(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccKubernetesClusterNodePool_kubeletAndLinuxOSConfigPartial(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccKubernetesClusterNodePool_kubeletAndLinuxOSConfigPartial(t) +} + +func testAccKubernetesClusterNodePool_kubeletAndLinuxOSConfigPartial(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") + r := KubernetesClusterNodePoolResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.kubeletAndLinuxOSConfigPartial(data), + Check: acceptance.ComposeTestCheckFunc( + + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccKubernetesClusterNodePool_other(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccKubernetesClusterNodePool_other(t) +} + +func testAccKubernetesClusterNodePool_other(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") + r := KubernetesClusterNodePoolResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.other(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func TestAccKubernetesClusterNodePool_multiplePools(t *testing.T) { checkIfShouldRunTestsIndividually(t) testAccKubernetesClusterNodePool_multiplePools(t) @@ -171,10 +234,10 @@ func testAccKubernetesClusterNodePool_multiplePools(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "autoscale") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.multiplePoolsConfig(data, 3), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -196,10 +259,10 @@ func testAccKubernetesClusterNodePool_manualScale(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.manualScaleConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -216,10 +279,10 @@ func testAccKubernetesClusterNodePool_manualScaleMultiplePools(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "first") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.manualScaleMultiplePoolsConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That("azurerm_kubernetes_cluster_node_pool.second").ExistsInAzure(r), ), @@ -242,10 +305,10 @@ func testAccKubernetesClusterNodePool_manualScaleMultiplePoolsUpdate(t *testing. data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "first") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.manualScaleMultiplePoolsNodeCountConfig(data, 1), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That("azurerm_kubernetes_cluster_node_pool.second").ExistsInAzure(r), ), @@ -258,7 +321,7 @@ func testAccKubernetesClusterNodePool_manualScaleMultiplePoolsUpdate(t *testing. }, { Config: r.manualScaleMultiplePoolsNodeCountConfig(data, 2), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That("azurerm_kubernetes_cluster_node_pool.second").ExistsInAzure(r), ), @@ -282,10 +345,10 @@ func testAccKubernetesClusterNodePool_manualScaleIgnoreChanges(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.manualScaleIgnoreChangesConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("node_count").HasValue("1"), data.CheckWithClient(r.scaleNodePool(2)), @@ -293,7 +356,7 @@ func testAccKubernetesClusterNodePool_manualScaleIgnoreChanges(t *testing.T) { }, { Config: r.manualScaleIgnoreChangesUpdatedConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("node_count").HasValue("2"), ), @@ -310,10 +373,10 @@ func testAccKubernetesClusterNodePool_manualScaleUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.manualScaleNodeCountConfig(data, 1), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -321,7 +384,7 @@ func testAccKubernetesClusterNodePool_manualScaleUpdate(t *testing.T) { { // up Config: r.manualScaleNodeCountConfig(data, 3), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -329,7 +392,7 @@ func testAccKubernetesClusterNodePool_manualScaleUpdate(t *testing.T) { { // and down Config: r.manualScaleNodeCountConfig(data, 1), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -346,17 +409,17 @@ func testAccKubernetesClusterNodePool_manualScaleVMSku(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.manualScaleVMSkuConfig(data, "Standard_F2s_v2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.manualScaleVMSkuConfig(data, "Standard_F4s_v2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -373,10 +436,10 @@ func testAccKubernetesClusterNodePool_modeSystem(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.modeSystemConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -393,24 +456,24 @@ func testAccKubernetesClusterNodePool_modeUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.modeUserConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.modeSystemConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.modeUserConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -430,24 +493,24 @@ func testAccKubernetesClusterNodePool_nodeLabels(t *testing.T) { labels2 := map[string]string{"key2": "value2"} labels3 := map[string]string{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.nodeLabelsConfig(data, labels1), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("node_labels.%").HasValue("1"), check.That(data.ResourceName).Key("node_labels.key").HasValue("value"), ), }, { Config: r.nodeLabelsConfig(data, labels2), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("node_labels.%").HasValue("1"), check.That(data.ResourceName).Key("node_labels.key2").HasValue("value2"), ), }, { Config: r.nodeLabelsConfig(data, labels3), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("node_labels.%").HasValue("0"), ), }, @@ -463,10 +526,10 @@ func testAccKubernetesClusterNodePool_nodePublicIP(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.nodePublicIPConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -483,10 +546,10 @@ func testAccKubernetesClusterNodePool_nodeTaints(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.nodeTaintsConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -503,10 +566,10 @@ func testAccKubernetesClusterNodePool_osDiskSizeGB(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.osDiskSizeGBConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -523,10 +586,10 @@ func testAccKubernetesClusterNodePool_proximityPlacementGroupId(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.proximityPlacementGroupIdConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -543,10 +606,10 @@ func testAccKubernetesClusterNodePool_osDiskType(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.osDiskTypeConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -563,10 +626,10 @@ func testAccKubernetesClusterNodePool_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.manualScaleConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -586,10 +649,10 @@ func testAccKubernetesClusterNodePool_spot(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.spotConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -606,10 +669,10 @@ func testAccKubernetesClusterNodePool_upgradeSettings(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.upgradeSettingsConfig(data, "2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("upgrade_settings.#").HasValue("1"), check.That(data.ResourceName).Key("upgrade_settings.0.max_surge").HasValue("2"), @@ -618,7 +681,7 @@ func testAccKubernetesClusterNodePool_upgradeSettings(t *testing.T) { data.ImportStep(), { Config: r.upgradeSettingsConfig(data, "4"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("upgrade_settings.#").HasValue("1"), check.That(data.ResourceName).Key("upgrade_settings.0.max_surge").HasValue("4"), @@ -627,7 +690,7 @@ func testAccKubernetesClusterNodePool_upgradeSettings(t *testing.T) { data.ImportStep(), { Config: r.upgradeSettingsConfig(data, ""), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("upgrade_settings.#").HasValue("0"), ), @@ -645,10 +708,10 @@ func testAccKubernetesClusterNodePool_virtualNetworkAutomatic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.virtualNetworkAutomaticConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -665,10 +728,10 @@ func testAccKubernetesClusterNodePool_virtualNetworkManual(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.virtualNetworkManualConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -685,10 +748,10 @@ func testAccKubernetesClusterNodePool_windows(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.windowsConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("tags.Os").HasValue("Windows"), ), @@ -706,10 +769,10 @@ func testAccKubernetesClusterNodePool_windowsAndLinux(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.windowsAndLinuxConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That("azurerm_kubernetes_cluster_node_pool.linux").ExistsInAzure(r), check.That("azurerm_kubernetes_cluster_node_pool.windows").ExistsInAzure(r), ), @@ -736,10 +799,10 @@ func testAccKubernetesClusterNodePool_zeroSize(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.zeroSizeConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -756,10 +819,10 @@ func testAccKubernetesClusterNodePool_hostEncryption(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.hostEncryption(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("enable_host_encryption").HasValue("true"), ), @@ -776,10 +839,10 @@ func testAccKubernetesClusterNodePool_maxSize(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.maxSizeConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -796,10 +859,10 @@ func testAccKubernetesClusterNodePool_sameSize(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.sameSizeConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -807,7 +870,7 @@ func testAccKubernetesClusterNodePool_sameSize(t *testing.T) { }) } -func (t KubernetesClusterNodePoolResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t KubernetesClusterNodePoolResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.NodePoolID(state.ID) if err != nil { return nil, err @@ -822,7 +885,7 @@ func (t KubernetesClusterNodePoolResource) Exists(ctx context.Context, clients * } func (KubernetesClusterNodePoolResource) scaleNodePool(nodeCount int) acceptance.ClientCheckFunc { - return func(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) error { + return func(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) error { nodePoolName := state.Attributes["name"] kubernetesClusterId := state.Attributes["kubernetes_cluster_id"] parsedK8sId, err := parse.ClusterID(kubernetesClusterId) @@ -937,6 +1000,147 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) } +func (KubernetesClusterNodePoolResource) kubeletAndLinuxOSConfig(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + } + + identity { + type = "SystemAssigned" + } +} + +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + node_count = 1 + + kubelet_config { + cpu_manager_policy = "static" + cpu_cfs_quota_enabled = true + cpu_cfs_quota_period = "10ms" + image_gc_high_threshold = 90 + image_gc_low_threshold = 70 + topology_manager_policy = "best-effort" + allowed_unsafe_sysctls = ["kernel.msg*", "net.core.somaxconn"] + container_log_max_size_mb = 100 + container_log_max_line = 100000 + pod_max_pid = 12345 + } + + linux_os_config { + transparent_huge_page_enabled = "always" + transparent_huge_page_defrag = "always" + swap_file_size_mb = 300 + + sysctl_config { + fs_aio_max_nr = 65536 + fs_file_max = 100000 + fs_inotify_max_user_watches = 1000000 + fs_nr_open = 1048576 + kernel_threads_max = 200000 + net_core_netdev_max_backlog = 1800 + net_core_optmem_max = 30000 + net_core_rmem_max = 300000 + net_core_rmem_default = 300000 + net_core_somaxconn = 5000 + net_core_wmem_default = 300000 + net_core_wmem_max = 300000 + net_ipv4_ip_local_port_range_min = 32768 + net_ipv4_ip_local_port_range_max = 60000 + net_ipv4_neigh_default_gc_thresh1 = 128 + net_ipv4_neigh_default_gc_thresh2 = 512 + net_ipv4_neigh_default_gc_thresh3 = 1024 + net_ipv4_tcp_fin_timeout = 60 + net_ipv4_tcp_keepalive_probes = 9 + net_ipv4_tcp_keepalive_time = 6000 + net_ipv4_tcp_max_syn_backlog = 2048 + net_ipv4_tcp_max_tw_buckets = 100000 + net_ipv4_tcp_tw_reuse = true + net_ipv4_tcp_keepalive_intvl = 70 + net_netfilter_nf_conntrack_buckets = 65536 + net_netfilter_nf_conntrack_max = 200000 + vm_max_map_count = 65536 + vm_swappiness = 45 + vm_vfs_cache_pressure = 80 + } + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) +} + +func (KubernetesClusterNodePoolResource) kubeletAndLinuxOSConfigPartial(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + } + + identity { + type = "SystemAssigned" + } +} + +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + node_count = 1 + + kubelet_config { + cpu_manager_policy = "static" + cpu_cfs_quota_enabled = true + cpu_cfs_quota_period = "10ms" + } + + linux_os_config { + transparent_huge_page_enabled = "always" + + sysctl_config { + fs_aio_max_nr = 65536 + fs_file_max = 100000 + fs_inotify_max_user_watches = 1000000 + } + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) +} + func (KubernetesClusterNodePoolResource) availabilityZonesConfig(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { @@ -1244,14 +1448,22 @@ provider "azurerm" { %s +resource "azurerm_public_ip_prefix" "test" { + name = "acctestpipprefix%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + prefix_length = 31 +} + resource "azurerm_kubernetes_cluster_node_pool" "test" { - name = "internal" - kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id - vm_size = "Standard_DS2_v2" - node_count = 1 - enable_node_public_ip = true + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + node_count = 1 + enable_node_public_ip = true + node_public_ip_prefix_id = azurerm_public_ip_prefix.test.id } -`, r.templateConfig(data)) +`, r.templateConfig(data), data.RandomInteger) } func (r KubernetesClusterNodePoolResource) nodeTaintsConfig(data acceptance.TestData) string { @@ -1682,3 +1894,22 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" { } `, r.templateConfig(data)) } + +func (r KubernetesClusterNodePoolResource) other(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +%s + +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + node_count = 3 + fips_enabled = true + kubelet_disk_type = "OS" +} +`, r.templateConfig(data)) +} diff --git a/azurerm/internal/services/containers/kubernetes_cluster_other_resource_test.go b/azurerm/internal/services/containers/kubernetes_cluster_other_resource_test.go index 3a1707053539..359ade3e6824 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_other_resource_test.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_other_resource_test.go @@ -5,7 +5,6 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -15,13 +14,18 @@ var kubernetesOtherTests = map[string]func(t *testing.T){ "basicVMSS": testAccKubernetesCluster_basicVMSS, "requiresImport": testAccKubernetesCluster_requiresImport, "criticalAddonsTaint": testAccKubernetesCluster_criticalAddonsTaint, + "kubeletAndLinuxOSConfig": testAccKubernetesCluster_kubeletAndLinuxOSConfig, + "kubeletAndLinuxOSConfig_partial": testAccKubernetesCluster_kubeletAndLinuxOSConfigPartial, "linuxProfile": testAccKubernetesCluster_linuxProfile, "nodeLabels": testAccKubernetesCluster_nodeLabels, "nodeResourceGroup": testAccKubernetesCluster_nodeResourceGroup, + "nodePoolOther": testAccKubernetesCluster_nodePoolOther, "paidSku": testAccKubernetesCluster_paidSku, "upgradeConfig": testAccKubernetesCluster_upgrade, "tags": testAccKubernetesCluster_tags, "windowsProfile": testAccKubernetesCluster_windowsProfile, + "windowsProfileLicense": testAccKubernetesCluster_windowsProfileLicense, + "updateWindowsProfileLicense": TestAccKubernetesCluster_updateWindowsProfileLicense, "outboundTypeLoadBalancer": testAccKubernetesCluster_outboundTypeLoadBalancer, "privateClusterOn": testAccKubernetesCluster_privateClusterOn, "privateClusterOff": testAccKubernetesCluster_privateClusterOff, @@ -46,10 +50,10 @@ func testAccKubernetesCluster_basicAvailabilitySet(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicAvailabilitySetConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("role_based_access_control.#").HasValue("1"), check.That(data.ResourceName).Key("role_based_access_control.0.enabled").HasValue("false"), @@ -73,10 +77,10 @@ func testAccKubernetesCluster_sameSizeVMSSConfig(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.sameSize(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("role_based_access_control.#").HasValue("1"), check.That(data.ResourceName).Key("role_based_access_control.0.enabled").HasValue("false"), @@ -105,10 +109,10 @@ func testAccKubernetesCluster_basicVMSS(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicVMSSConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("role_based_access_control.#").HasValue("1"), check.That(data.ResourceName).Key("role_based_access_control.0.enabled").HasValue("false"), @@ -137,10 +141,10 @@ func testAccKubernetesCluster_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicVMSSConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -160,10 +164,10 @@ func testAccKubernetesCluster_criticalAddonsTaint(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.criticalAddonsTaintConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("default_node_pool.0.only_critical_addons_enabled").HasValue("true"), ), @@ -172,6 +176,46 @@ func testAccKubernetesCluster_criticalAddonsTaint(t *testing.T) { }) } +func TestAccKubernetesCluster_kubeletAndLinuxOSConfig(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccKubernetesCluster_kubeletAndLinuxOSConfig(t) +} + +func testAccKubernetesCluster_kubeletAndLinuxOSConfig(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + r := KubernetesClusterResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.kubeletAndLinuxOSConfig(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccKubernetesCluster_kubeletAndLinuxOSConfigPartial(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccKubernetesCluster_kubeletAndLinuxOSConfigPartial(t) +} + +func testAccKubernetesCluster_kubeletAndLinuxOSConfigPartial(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + r := KubernetesClusterResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.kubeletAndLinuxOSConfigPartial(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func TestAccKubernetesCluster_linuxProfile(t *testing.T) { checkIfShouldRunTestsIndividually(t) testAccKubernetesCluster_linuxProfile(t) @@ -181,10 +225,10 @@ func testAccKubernetesCluster_linuxProfile(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.linuxProfileConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("kube_config.0.client_key").Exists(), check.That(data.ResourceName).Key("kube_config.0.client_certificate").Exists(), @@ -211,26 +255,26 @@ func testAccKubernetesCluster_nodeLabels(t *testing.T) { labels2 := map[string]string{"key2": "value2"} labels3 := map[string]string{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.nodeLabelsConfig(data, labels1), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("default_node_pool.0.node_labels.key").HasValue("value"), ), }, { Config: r.nodeLabelsConfig(data, labels2), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("default_node_pool.0.node_labels.key2").HasValue("value2"), ), }, { Config: r.nodeLabelsConfig(data, labels3), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - resource.TestCheckNoResourceAttr(data.ResourceName, "default_node_pool.0.node_labels"), + acceptance.TestCheckNoResourceAttr(data.ResourceName, "default_node_pool.0.node_labels"), ), }, }) @@ -245,10 +289,30 @@ func testAccKubernetesCluster_nodeResourceGroup(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.nodeResourceGroupConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccKubernetesCluster_nodePoolOther(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccKubernetesCluster_nodePoolOther(t) +} + +func testAccKubernetesCluster_nodePoolOther(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + r := KubernetesClusterResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.nodePoolOther(data), + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -265,17 +329,24 @@ func testAccKubernetesCluster_upgradeSkuTier(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.freeSkuConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.paidSkuConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.freeSkuConfig(data), + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -292,10 +363,10 @@ func testAccKubernetesCluster_paidSku(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.paidSkuConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -312,17 +383,17 @@ func testAccKubernetesCluster_upgrade(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.upgradeConfig(data, olderKubernetesVersion), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("kubernetes_version").HasValue(olderKubernetesVersion), ), }, { Config: r.upgradeConfig(data, currentKubernetesVersion), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("kubernetes_version").HasValue(currentKubernetesVersion), ), @@ -339,17 +410,17 @@ func testAccKubernetesCluster_tags(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.tagsConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.tagsUpdatedConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -366,10 +437,10 @@ func testAccKubernetesCluster_windowsProfile(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.windowsProfileConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("kube_config.0.client_key").Exists(), check.That(data.ResourceName).Key("kube_config.0.client_certificate").Exists(), @@ -388,6 +459,62 @@ func testAccKubernetesCluster_windowsProfile(t *testing.T) { }) } +func TestAccKubernetesCluster_windowsProfileLicense(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccKubernetesCluster_windowsProfileLicense(t) +} + +func testAccKubernetesCluster_windowsProfileLicense(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + r := KubernetesClusterResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.windowsProfileLicense(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep( + "windows_profile.0.admin_password", + ), + }) +} + +func TestAccKubernetesCluster_updateWindowsProfileLicense(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccKubernetesCluster_updateWindowsProfileLicense(t) +} + +func testAccKubernetesCluster_updateWindowsProfileLicense(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + r := KubernetesClusterResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.windowsProfileConfig(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("windows_profile.0.admin_password"), + { + Config: r.windowsProfileLicense(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("windows_profile.0.admin_password"), + { + Config: r.windowsProfileConfig(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("windows_profile.0.admin_password"), + }) +} + func TestAccKubernetesCluster_diskEncryption(t *testing.T) { checkIfShouldRunTestsIndividually(t) testAccKubernetesCluster_diskEncryption(t) @@ -397,10 +524,10 @@ func testAccKubernetesCluster_diskEncryption(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.diskEncryptionConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("disk_encryption_set_id").Exists(), ), @@ -420,10 +547,10 @@ func testAccKubernetesCluster_upgradeChannel(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.upgradeChannelConfig(data, olderKubernetesVersion, "rapid"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("kubernetes_version").HasValue(olderKubernetesVersion), check.That(data.ResourceName).Key("automatic_channel_upgrade").HasValue("rapid"), @@ -432,7 +559,7 @@ func testAccKubernetesCluster_upgradeChannel(t *testing.T) { data.ImportStep(), { Config: r.upgradeChannelConfig(data, olderKubernetesVersion, "patch"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("kubernetes_version").HasValue(olderKubernetesVersion), check.That(data.ResourceName).Key("automatic_channel_upgrade").HasValue("patch"), @@ -442,7 +569,7 @@ func testAccKubernetesCluster_upgradeChannel(t *testing.T) { { // unset = none Config: r.upgradeChannelConfig(data, olderKubernetesVersion, ""), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("kubernetes_version").HasValue(olderKubernetesVersion), check.That(data.ResourceName).Key("automatic_channel_upgrade").HasValue(""), @@ -451,7 +578,7 @@ func testAccKubernetesCluster_upgradeChannel(t *testing.T) { data.ImportStep(), { Config: r.upgradeChannelConfig(data, olderKubernetesVersion, "stable"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("kubernetes_version").HasValue(olderKubernetesVersion), check.That(data.ResourceName).Key("automatic_channel_upgrade").HasValue("stable"), @@ -775,6 +902,131 @@ resource "azurerm_kubernetes_cluster" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) } +func (KubernetesClusterResource) kubeletAndLinuxOSConfig(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + kubelet_config { + cpu_manager_policy = "static" + cpu_cfs_quota_enabled = true + cpu_cfs_quota_period = "10ms" + image_gc_high_threshold = 90 + image_gc_low_threshold = 70 + topology_manager_policy = "best-effort" + allowed_unsafe_sysctls = ["kernel.msg*", "net.core.somaxconn"] + container_log_max_size_mb = 100 + container_log_max_line = 100000 + pod_max_pid = 12345 + } + + linux_os_config { + transparent_huge_page_enabled = "always" + transparent_huge_page_defrag = "always" + swap_file_size_mb = 300 + + sysctl_config { + fs_aio_max_nr = 65536 + fs_file_max = 100000 + fs_inotify_max_user_watches = 1000000 + fs_nr_open = 1048576 + kernel_threads_max = 200000 + net_core_netdev_max_backlog = 1800 + net_core_optmem_max = 30000 + net_core_rmem_max = 300000 + net_core_rmem_default = 300000 + net_core_somaxconn = 5000 + net_core_wmem_default = 300000 + net_core_wmem_max = 300000 + net_ipv4_ip_local_port_range_min = 32768 + net_ipv4_ip_local_port_range_max = 60000 + net_ipv4_neigh_default_gc_thresh1 = 128 + net_ipv4_neigh_default_gc_thresh2 = 512 + net_ipv4_neigh_default_gc_thresh3 = 1024 + net_ipv4_tcp_fin_timeout = 60 + net_ipv4_tcp_keepalive_probes = 9 + net_ipv4_tcp_keepalive_time = 6000 + net_ipv4_tcp_max_syn_backlog = 2048 + net_ipv4_tcp_max_tw_buckets = 100000 + net_ipv4_tcp_tw_reuse = true + net_ipv4_tcp_keepalive_intvl = 70 + net_netfilter_nf_conntrack_buckets = 65536 + net_netfilter_nf_conntrack_max = 200000 + vm_max_map_count = 65536 + vm_swappiness = 45 + vm_vfs_cache_pressure = 80 + } + } + } + + identity { + type = "SystemAssigned" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) +} + +func (KubernetesClusterResource) kubeletAndLinuxOSConfigPartial(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + kubelet_config { + cpu_manager_policy = "static" + cpu_cfs_quota_enabled = true + cpu_cfs_quota_period = "10ms" + } + + linux_os_config { + transparent_huge_page_enabled = "always" + + sysctl_config { + fs_aio_max_nr = 65536 + fs_file_max = 100000 + fs_inotify_max_user_watches = 1000000 + } + } + } + + identity { + type = "SystemAssigned" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) +} + func (KubernetesClusterResource) linuxProfileConfig(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { @@ -878,10 +1130,39 @@ resource "azurerm_kubernetes_cluster" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger) } +func (KubernetesClusterResource) nodePoolOther(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + fips_enabled = true + kubelet_disk_type = "OS" + } + + identity { + type = "SystemAssigned" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) +} + func (KubernetesClusterResource) paidSkuConfig(data acceptance.TestData) string { - // @tombuildsstuff (2020-05-29) - this is only supported in a handful of regions - // whilst in Preview - hard-coding for now - location := "westus2" // TODO: data.Locations.Primary return fmt.Sprintf(` provider "azurerm" { features {} @@ -909,13 +1190,10 @@ resource "azurerm_kubernetes_cluster" "test" { type = "SystemAssigned" } } -`, data.RandomInteger, location, data.RandomInteger, data.RandomInteger) +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) } func (KubernetesClusterResource) freeSkuConfig(data acceptance.TestData) string { - // @tombuildsstuff (2020-05-29) - this is only supported in a handful of regions - // whilst in Preview - hard-coding for now - location := "westus2" // TODO: data.Locations.Primary return fmt.Sprintf(` provider "azurerm" { features {} @@ -942,7 +1220,7 @@ resource "azurerm_kubernetes_cluster" "test" { type = "SystemAssigned" } } -`, data.RandomInteger, location, data.RandomInteger, data.RandomInteger) +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) } func (KubernetesClusterResource) tagsConfig(data acceptance.TestData) string { @@ -1104,6 +1382,59 @@ resource "azurerm_kubernetes_cluster" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger) } +func (KubernetesClusterResource) windowsProfileLicense(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + windows_profile { + admin_username = "azureuser" + admin_password = "P@55W0rd1234!h@2h1C0rP" + license = "Windows_Server" + } + + # the default node pool /has/ to be Linux agents - Windows agents can be added via the node pools resource + default_node_pool { + name = "np" + node_count = 3 + vm_size = "Standard_DS2_v2" + } + + identity { + type = "SystemAssigned" + } + + network_profile { + network_plugin = "azure" + network_policy = "azure" + dns_service_ip = "10.10.0.10" + docker_bridge_cidr = "172.18.0.1/16" + service_cidr = "10.10.0.0/16" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger) +} + func (KubernetesClusterResource) diskEncryptionConfig(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_resource.go index 949855b0c27a..c8c7d075a862 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource.go @@ -9,8 +9,6 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-03-01/containerservice" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" @@ -25,12 +23,13 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceKubernetesCluster() *schema.Resource { - return &schema.Resource{ +func resourceKubernetesCluster() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceKubernetesClusterCreate, Read: resourceKubernetesClusterRead, Update: resourceKubernetesClusterUpdate, @@ -42,22 +41,22 @@ func resourceKubernetesCluster() *schema.Resource { }), CustomizeDiff: pluginsdk.CustomDiffInSequence( - // Downgrade from Paid to Free is not supported and requires rebuild to apply - pluginsdk.ForceNewIfChange("sku_tier", func(ctx context.Context, old, new, meta interface{}) bool { - return new == "Free" + // Migration of `identity` to `service_principal` is not allowed, the other way around is + pluginsdk.ForceNewIfChange("service_principal.0.client_id", func(ctx context.Context, old, new, meta interface{}) bool { + return old == "msi" || old == "" }), ), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(90 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(90 * time.Minute), - Delete: schema.DefaultTimeout(90 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(90 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(90 * time.Minute), + Delete: pluginsdk.DefaultTimeout(90 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, @@ -68,7 +67,7 @@ func resourceKubernetesCluster() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "dns_prefix": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ExactlyOneOf: []string{"dns_prefix", "dns_prefix_private_cluster"}, @@ -76,14 +75,14 @@ func resourceKubernetesCluster() *schema.Resource { }, "dns_prefix_private_cluster": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ExactlyOneOf: []string{"dns_prefix", "dns_prefix_private_cluster"}, }, "kubernetes_version": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ValidateFunc: validation.StringIsNotEmpty, @@ -95,28 +94,28 @@ func resourceKubernetesCluster() *schema.Resource { "addon_profile": schemaKubernetesAddOnProfiles(), "api_server_authorized_ip_ranges": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validate.CIDR, }, }, "auto_scaler_profile": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, Computed: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "balance_similar_node_groups": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, "expander": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ValidateFunc: validation.StringInSlice([]string{ @@ -127,87 +126,87 @@ func resourceKubernetesCluster() *schema.Resource { }, false), }, "max_graceful_termination_sec": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, }, "max_node_provisioning_time": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: "15m", ValidateFunc: containerValidate.Duration, }, "max_unready_nodes": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Default: 3, ValidateFunc: validation.IntAtLeast(0), }, "max_unready_percentage": { - Type: schema.TypeFloat, + Type: pluginsdk.TypeFloat, Optional: true, Default: 45, ValidateFunc: validation.FloatBetween(0, 100), }, "new_pod_scale_up_delay": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ValidateFunc: containerValidate.Duration, }, "scan_interval": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ValidateFunc: containerValidate.Duration, }, "scale_down_delay_after_add": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ValidateFunc: containerValidate.Duration, }, "scale_down_delay_after_delete": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ValidateFunc: containerValidate.Duration, }, "scale_down_delay_after_failure": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ValidateFunc: containerValidate.Duration, }, "scale_down_unneeded": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ValidateFunc: containerValidate.Duration, }, "scale_down_unready": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ValidateFunc: containerValidate.Duration, }, "scale_down_utilization_threshold": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, }, "empty_bulk_delete_max": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, }, "skip_nodes_with_local_storage": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, "skip_nodes_with_system_pods": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, @@ -216,44 +215,43 @@ func resourceKubernetesCluster() *schema.Resource { }, "disk_encryption_set_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: computeValidate.DiskEncryptionSetID, }, "enable_pod_security_policy": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, }, "identity": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Type: pluginsdk.TypeList, + Optional: true, + ExactlyOneOf: []string{"identity", "service_principal"}, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, - ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ string(containerservice.ResourceIdentityTypeSystemAssigned), string(containerservice.ResourceIdentityTypeUserAssigned), }, false), }, "user_assigned_identity_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, ValidateFunc: msivalidate.UserAssignedIdentityID, Optional: true, }, "principal_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "tenant_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -261,48 +259,62 @@ func resourceKubernetesCluster() *schema.Resource { }, "kubelet_identity": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "client_id": { - Type: schema.TypeString, - Computed: true, + Type: pluginsdk.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + RequiredWith: []string{"kubelet_identity.0.object_id", "kubelet_identity.0.user_assigned_identity_id", "identity.0.user_assigned_identity_id"}, + ValidateFunc: validation.StringIsNotEmpty, }, "object_id": { - Type: schema.TypeString, - Computed: true, + Type: pluginsdk.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + RequiredWith: []string{"kubelet_identity.0.client_id", "kubelet_identity.0.user_assigned_identity_id", "identity.0.user_assigned_identity_id"}, + ValidateFunc: validation.StringIsNotEmpty, }, "user_assigned_identity_id": { - Type: schema.TypeString, - Computed: true, + Type: pluginsdk.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + RequiredWith: []string{"kubelet_identity.0.client_id", "kubelet_identity.0.object_id", "identity.0.user_assigned_identity_id"}, + ValidateFunc: msivalidate.UserAssignedIdentityID, }, }, }, }, "linux_profile": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "admin_username": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: containerValidate.KubernetesAdminUserName, }, "ssh_key": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, ForceNew: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "key_data": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, @@ -315,15 +327,15 @@ func resourceKubernetesCluster() *schema.Resource { }, "network_profile": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, Computed: true, ForceNew: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "network_plugin": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ @@ -333,7 +345,7 @@ func resourceKubernetesCluster() *schema.Resource { }, "network_mode": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ForceNew: true, @@ -347,7 +359,7 @@ func resourceKubernetesCluster() *schema.Resource { }, "network_policy": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ForceNew: true, @@ -358,7 +370,7 @@ func resourceKubernetesCluster() *schema.Resource { }, "dns_service_ip": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ForceNew: true, @@ -366,7 +378,7 @@ func resourceKubernetesCluster() *schema.Resource { }, "docker_bridge_cidr": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ForceNew: true, @@ -374,7 +386,7 @@ func resourceKubernetesCluster() *schema.Resource { }, "pod_cidr": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ForceNew: true, @@ -382,7 +394,7 @@ func resourceKubernetesCluster() *schema.Resource { }, "service_cidr": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ForceNew: true, @@ -390,7 +402,7 @@ func resourceKubernetesCluster() *schema.Resource { }, "load_balancer_sku": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: string(containerservice.LoadBalancerSkuStandard), ForceNew: true, @@ -403,7 +415,7 @@ func resourceKubernetesCluster() *schema.Resource { }, "outbound_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, Default: string(containerservice.OutboundTypeLoadBalancer), @@ -414,60 +426,60 @@ func resourceKubernetesCluster() *schema.Resource { }, "load_balancer_profile": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, MaxItems: 1, ForceNew: true, Optional: true, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "outbound_ports_allocated": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Default: 0, ValidateFunc: validation.IntBetween(0, 64000), }, "idle_timeout_in_minutes": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Default: 30, ValidateFunc: validation.IntBetween(4, 120), }, "managed_outbound_ip_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validation.IntBetween(1, 100), ConflictsWith: []string{"network_profile.0.load_balancer_profile.0.outbound_ip_prefix_ids", "network_profile.0.load_balancer_profile.0.outbound_ip_address_ids"}, }, "outbound_ip_prefix_ids": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, Computed: true, - ConfigMode: schema.SchemaConfigModeAttr, + ConfigMode: pluginsdk.SchemaConfigModeAttr, ConflictsWith: []string{"network_profile.0.load_balancer_profile.0.managed_outbound_ip_count", "network_profile.0.load_balancer_profile.0.outbound_ip_address_ids"}, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: azure.ValidateResourceID, }, }, "outbound_ip_address_ids": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, Computed: true, - ConfigMode: schema.SchemaConfigModeAttr, + ConfigMode: pluginsdk.SchemaConfigModeAttr, ConflictsWith: []string{"network_profile.0.load_balancer_profile.0.managed_outbound_ip_count", "network_profile.0.load_balancer_profile.0.outbound_ip_prefix_ids"}, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: azure.ValidateResourceID, }, }, "effective_outbound_ips": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Computed: true, - ConfigMode: schema.SchemaConfigModeAttr, - Elem: &schema.Schema{ - Type: schema.TypeString, + ConfigMode: pluginsdk.SchemaConfigModeAttr, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, }, @@ -478,19 +490,19 @@ func resourceKubernetesCluster() *schema.Resource { }, "node_resource_group": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ForceNew: true, }, "private_fqdn": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "private_link_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, ForceNew: true, Computed: true, @@ -499,7 +511,7 @@ func resourceKubernetesCluster() *schema.Resource { }, "private_cluster_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, ForceNew: true, Computed: true, // TODO -- remove this when deprecation resolves @@ -507,7 +519,7 @@ func resourceKubernetesCluster() *schema.Resource { }, "private_dns_zone_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, // a Private Cluster is `System` by default even if unspecified ForceNew: true, @@ -521,25 +533,25 @@ func resourceKubernetesCluster() *schema.Resource { }, "role_based_access_control": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, Computed: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Required: true, ForceNew: true, }, "azure_active_directory": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "client_app_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.IsUUID, AtLeastOneOf: []string{"role_based_access_control.0.azure_active_directory.0.client_app_id", "role_based_access_control.0.azure_active_directory.0.server_app_id", @@ -549,7 +561,7 @@ func resourceKubernetesCluster() *schema.Resource { }, "server_app_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.IsUUID, AtLeastOneOf: []string{"role_based_access_control.0.azure_active_directory.0.client_app_id", "role_based_access_control.0.azure_active_directory.0.server_app_id", @@ -559,7 +571,7 @@ func resourceKubernetesCluster() *schema.Resource { }, "server_app_secret": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Sensitive: true, ValidateFunc: validation.StringIsNotEmpty, @@ -570,7 +582,7 @@ func resourceKubernetesCluster() *schema.Resource { }, "tenant_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, // OrEmpty since this can be sourced from the client config if it's not specified @@ -582,7 +594,7 @@ func resourceKubernetesCluster() *schema.Resource { }, "managed": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, AtLeastOneOf: []string{"role_based_access_control.0.azure_active_directory.0.client_app_id", "role_based_access_control.0.azure_active_directory.0.server_app_id", "role_based_access_control.0.azure_active_directory.0.server_app_secret", "role_based_access_control.0.azure_active_directory.0.tenant_id", @@ -591,19 +603,16 @@ func resourceKubernetesCluster() *schema.Resource { }, "azure_rbac_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, - // ForceNew can be removed after GA: - // https://docs.microsoft.com/en-us/azure/aks/manage-azure-rbac#limitations - ForceNew: true, }, "admin_group_object_ids": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - ConfigMode: schema.SchemaConfigModeAttr, - Elem: &schema.Schema{ - Type: schema.TypeString, + ConfigMode: pluginsdk.SchemaConfigModeAttr, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.IsUUID, }, AtLeastOneOf: []string{"role_based_access_control.0.azure_active_directory.0.client_app_id", "role_based_access_control.0.azure_active_directory.0.server_app_id", @@ -619,19 +628,20 @@ func resourceKubernetesCluster() *schema.Resource { }, "service_principal": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Type: pluginsdk.TypeList, + Optional: true, + ExactlyOneOf: []string{"identity", "service_principal"}, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "client_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: containerValidate.ClientID, }, "client_secret": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, Sensitive: true, ValidateFunc: validation.StringIsNotEmpty, @@ -641,14 +651,9 @@ func resourceKubernetesCluster() *schema.Resource { }, "sku_tier": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, - // @tombuildsstuff (2020-05-29) - Preview limitations: - // * Currently, there is no way to remove Uptime SLA from an AKS cluster after creation with it enabled. - // * Private clusters aren't currently supported. - // @jackofallops (2020-07-21) - Update: - // * sku_tier can now be upgraded in place, downgrade requires rebuild - Default: string(containerservice.ManagedClusterSKUTierFree), + Default: string(containerservice.ManagedClusterSKUTierFree), ValidateFunc: validation.StringInSlice([]string{ string(containerservice.ManagedClusterSKUTierFree), string(containerservice.ManagedClusterSKUTierPaid), @@ -658,29 +663,36 @@ func resourceKubernetesCluster() *schema.Resource { "tags": tags.Schema(), "windows_profile": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, Computed: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "admin_username": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "admin_password": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Sensitive: true, ValidateFunc: validation.StringLenBetween(8, 123), }, + "license": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + string(containerservice.LicenseTypeWindowsServer), + }, false), + }, }, }, }, "automatic_channel_upgrade": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ string(containerservice.UpgradeChannelPatch), @@ -691,40 +703,40 @@ func resourceKubernetesCluster() *schema.Resource { // Computed "fqdn": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "kube_admin_config": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "host": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "username": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "password": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "client_certificate": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "client_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "cluster_ca_certificate": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, @@ -733,41 +745,41 @@ func resourceKubernetesCluster() *schema.Resource { }, "kube_admin_config_raw": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "kube_config": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "host": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "username": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "password": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "client_certificate": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "client_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "cluster_ca_certificate": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, @@ -776,7 +788,7 @@ func resourceKubernetesCluster() *schema.Resource { }, "kube_config_raw": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, @@ -784,7 +796,7 @@ func resourceKubernetesCluster() *schema.Resource { } } -func resourceKubernetesClusterCreate(d *schema.ResourceData, meta interface{}) error { +func resourceKubernetesClusterCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.KubernetesClustersClient env := meta.(*clients.Client).Containers.Environment ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) @@ -848,7 +860,7 @@ func resourceKubernetesClusterCreate(d *schema.ResourceData, meta interface{}) e windowsProfileRaw := d.Get("windows_profile").([]interface{}) windowsProfile := expandKubernetesClusterWindowsProfile(windowsProfileRaw) - apiServerAuthorizedIPRangesRaw := d.Get("api_server_authorized_ip_ranges").(*schema.Set).List() + apiServerAuthorizedIPRangesRaw := d.Get("api_server_authorized_ip_ranges").(*pluginsdk.Set).List() apiServerAuthorizedIPRanges := utils.ExpandStringSlice(apiServerAuthorizedIPRangesRaw) enablePrivateCluster := false @@ -908,6 +920,7 @@ func resourceKubernetesClusterCreate(d *schema.ResourceData, meta interface{}) e } managedClusterIdentityRaw := d.Get("identity").([]interface{}) + kubernetesClusterIdentityRaw := d.Get("kubelet_identity").([]interface{}) servicePrincipalProfileRaw := d.Get("service_principal").([]interface{}) if len(managedClusterIdentityRaw) == 0 && len(servicePrincipalProfileRaw) == 0 { @@ -920,6 +933,9 @@ func resourceKubernetesClusterCreate(d *schema.ResourceData, meta interface{}) e ClientID: utils.String("msi"), } } + if len(kubernetesClusterIdentityRaw) > 0 { + parameters.ManagedClusterProperties.IdentityProfile = expandKubernetesClusterIdentityProfile(kubernetesClusterIdentityRaw) + } servicePrincipalSet := false if len(servicePrincipalProfileRaw) > 0 { @@ -972,7 +988,7 @@ func resourceKubernetesClusterCreate(d *schema.ResourceData, meta interface{}) e return resourceKubernetesClusterRead(d, meta) } -func resourceKubernetesClusterUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{}) error { containersClient := meta.(*clients.Client).Containers nodePoolsClient := containersClient.AgentPoolsClient clusterClient := containersClient.KubernetesClustersClient @@ -1011,7 +1027,7 @@ func resourceKubernetesClusterUpdate(d *schema.ResourceData, meta interface{}) e } } - if d.HasChange("service_principal") { + if d.HasChange("service_principal") && !d.HasChange("identity") { log.Printf("[DEBUG] Updating the Service Principal for Kubernetes Cluster %q (Resource Group %q)..", id.ManagedClusterName, id.ResourceGroup) servicePrincipals := d.Get("service_principal").([]interface{}) // we'll be rotating the Service Principal - removing the SP block is handled by the validate function @@ -1100,7 +1116,7 @@ func resourceKubernetesClusterUpdate(d *schema.ResourceData, meta interface{}) e if d.HasChange("api_server_authorized_ip_ranges") { updateCluster = true - apiServerAuthorizedIPRangesRaw := d.Get("api_server_authorized_ip_ranges").(*schema.Set).List() + apiServerAuthorizedIPRangesRaw := d.Get("api_server_authorized_ip_ranges").(*pluginsdk.Set).List() enablePrivateCluster := false if v, ok := d.GetOk("private_link_enabled"); ok { @@ -1311,7 +1327,7 @@ func resourceKubernetesClusterUpdate(d *schema.ResourceData, meta interface{}) e return resourceKubernetesClusterRead(d, meta) } -func resourceKubernetesClusterRead(d *schema.ResourceData, meta interface{}) error { +func resourceKubernetesClusterRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.KubernetesClustersClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -1467,7 +1483,7 @@ func resourceKubernetesClusterRead(d *schema.ResourceData, meta interface{}) err return tags.FlattenAndSet(d, resp.Tags) } -func resourceKubernetesClusterDelete(d *schema.ResourceData, meta interface{}) error { +func resourceKubernetesClusterDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.KubernetesClustersClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -1544,6 +1560,25 @@ func expandKubernetesClusterLinuxProfile(input []interface{}) *containerservice. } } +func expandKubernetesClusterIdentityProfile(input []interface{}) map[string]*containerservice.ManagedClusterPropertiesIdentityProfileValue { + identityProfile := make(map[string]*containerservice.ManagedClusterPropertiesIdentityProfileValue) + if len(input) == 0 || input[0] == nil { + return identityProfile + } + + values := input[0].(map[string]interface{}) + + if containerservice.ResourceIdentityType(values["user_assigned_identity_id"].(string)) != "" { + identityProfile["kubeletidentity"] = &containerservice.ManagedClusterPropertiesIdentityProfileValue{ + ResourceID: utils.String(values["user_assigned_identity_id"].(string)), + ClientID: utils.String(values["client_id"].(string)), + ObjectID: utils.String(values["object_id"].(string)), + } + } + + return identityProfile +} + func flattenKubernetesClusterIdentityProfile(profile map[string]*containerservice.ManagedClusterPropertiesIdentityProfileValue) ([]interface{}, error) { if profile == nil { return []interface{}{}, nil @@ -1621,18 +1656,19 @@ func expandKubernetesClusterWindowsProfile(input []interface{}) *containerservic config := input[0].(map[string]interface{}) - adminUsername := config["admin_username"].(string) - adminPassword := config["admin_password"].(string) - - profile := containerservice.ManagedClusterWindowsProfile{ - AdminUsername: &adminUsername, - AdminPassword: &adminPassword, + license := containerservice.LicenseTypeNone + if v := config["license"].(string); v != "" { + license = containerservice.LicenseType(v) } - return &profile + return &containerservice.ManagedClusterWindowsProfile{ + AdminUsername: utils.String(config["admin_username"].(string)), + AdminPassword: utils.String(config["admin_password"].(string)), + LicenseType: license, + } } -func flattenKubernetesClusterWindowsProfile(profile *containerservice.ManagedClusterWindowsProfile, d *schema.ResourceData) []interface{} { +func flattenKubernetesClusterWindowsProfile(profile *containerservice.ManagedClusterWindowsProfile, d *pluginsdk.ResourceData) []interface{} { if profile == nil { return []interface{}{} } @@ -1648,10 +1684,16 @@ func flattenKubernetesClusterWindowsProfile(profile *containerservice.ManagedClu adminPassword = v.(string) } + license := "" + if profile.LicenseType != containerservice.LicenseTypeNone { + license = string(profile.LicenseType) + } + return []interface{}{ map[string]interface{}{ "admin_password": adminPassword, "admin_username": adminUsername, + "license": license, }, } } @@ -1751,7 +1793,7 @@ func idsToResourceReferences(set interface{}) *[]containerservice.ResourceRefere return nil } - s := set.(*schema.Set) + s := set.(*pluginsdk.Set) results := make([]containerservice.ResourceReference, 0) for _, element := range s.List() { @@ -1882,7 +1924,7 @@ func expandKubernetesClusterRoleBasedAccessControl(input []interface{}, provider tenantId := azureAdRaw["tenant_id"].(string) managed := azureAdRaw["managed"].(bool) azureRbacEnabled := azureAdRaw["azure_rbac_enabled"].(bool) - adminGroupObjectIdsRaw := azureAdRaw["admin_group_object_ids"].(*schema.Set).List() + adminGroupObjectIdsRaw := azureAdRaw["admin_group_object_ids"].(*pluginsdk.Set).List() adminGroupObjectIds := utils.ExpandStringSlice(adminGroupObjectIdsRaw) if tenantId == "" { @@ -1951,7 +1993,7 @@ func expandKubernetesClusterManagedClusterIdentity(input []interface{}) *contain } } -func flattenKubernetesClusterRoleBasedAccessControl(input *containerservice.ManagedClusterProperties, d *schema.ResourceData) []interface{} { +func flattenKubernetesClusterRoleBasedAccessControl(input *containerservice.ManagedClusterProperties, d *pluginsdk.ResourceData) []interface{} { rbacEnabled := false if input.EnableRBAC != nil { rbacEnabled = *input.EnableRBAC @@ -2004,7 +2046,7 @@ func flattenKubernetesClusterRoleBasedAccessControl(input *containerservice.Mana } results = append(results, map[string]interface{}{ - "admin_group_object_ids": schema.NewSet(schema.HashString, adminGroupObjectIds), + "admin_group_object_ids": pluginsdk.NewSet(pluginsdk.HashString, adminGroupObjectIds), "client_app_id": clientAppId, "managed": managed, "server_app_id": serverAppId, @@ -2022,7 +2064,7 @@ func flattenKubernetesClusterRoleBasedAccessControl(input *containerservice.Mana } } -func flattenAzureRmKubernetesClusterServicePrincipalProfile(profile *containerservice.ManagedClusterServicePrincipalProfile, d *schema.ResourceData) []interface{} { +func flattenAzureRmKubernetesClusterServicePrincipalProfile(profile *containerservice.ManagedClusterServicePrincipalProfile, d *pluginsdk.ResourceData) []interface{} { if profile == nil { return []interface{}{} } @@ -2041,10 +2083,10 @@ func flattenAzureRmKubernetesClusterServicePrincipalProfile(profile *containerse if sp, ok := d.GetOk("service_principal"); ok { var val []interface{} - // prior to 1.34 this was a *schema.Set, now it's a List - try both + // prior to 1.34 this was a *pluginsdk.Set, now it's a List - try both if v, ok := sp.([]interface{}); ok { val = v - } else if v, ok := sp.(*schema.Set); ok { + } else if v, ok := sp.(*pluginsdk.Set); ok { val = v.List() } diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource_test.go b/azurerm/internal/services/containers/kubernetes_cluster_resource_test.go index f862b562a787..91699731b236 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource_test.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource_test.go @@ -6,12 +6,11 @@ import ( "net/http" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -19,8 +18,8 @@ type KubernetesClusterResource struct { } var ( - olderKubernetesVersion = "1.18.14" - currentKubernetesVersion = "1.19.9" + olderKubernetesVersion = "1.18.19" + currentKubernetesVersion = "1.19.11" ) func TestAccKubernetes_all(t *testing.T) { @@ -41,7 +40,7 @@ func TestAccKubernetes_all(t *testing.T) { t.Skip("Skipping since this is being run Individually") } -func (t KubernetesClusterResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t KubernetesClusterResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.ClusterID(state.ID) if err != nil { return nil, err @@ -56,7 +55,7 @@ func (t KubernetesClusterResource) Exists(ctx context.Context, clients *clients. } func (KubernetesClusterResource) updateDefaultNodePoolAgentCount(nodeCount int) acceptance.ClientCheckFunc { - return func(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) error { + return func(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) error { nodePoolName := state.Attributes["default_node_pool.0.name"] clusterName := state.Attributes["name"] resourceGroup := state.Attributes["resource_group_name"] @@ -98,10 +97,10 @@ func testAccKubernetesCluster_hostEncryption(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.hostEncryption(data, currentKubernetesVersion), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("default_node_pool.0.enable_host_encryption").HasValue("true"), ), diff --git a/azurerm/internal/services/containers/kubernetes_cluster_scaling_resource_test.go b/azurerm/internal/services/containers/kubernetes_cluster_scaling_resource_test.go index 29d53994aeaa..af22e9ad4c77 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_scaling_resource_test.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_scaling_resource_test.go @@ -5,7 +5,6 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -32,17 +31,17 @@ func testAccKubernetesCluster_addAgent(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.addAgentConfig(data, 1), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("default_node_pool.0.node_count").HasValue("1"), ), }, { Config: r.addAgentConfig(data, 2), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("default_node_pool.0.node_count").HasValue("2"), ), }, @@ -58,10 +57,10 @@ func testAccKubernetesCluster_manualScaleIgnoreChanges(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.manualScaleIgnoreChangesConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("default_node_pool.0.node_count").HasValue("1"), data.CheckWithClient(r.updateDefaultNodePoolAgentCount(2)), @@ -69,7 +68,7 @@ func testAccKubernetesCluster_manualScaleIgnoreChanges(t *testing.T) { }, { Config: r.manualScaleIgnoreChangesConfigUpdated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("default_node_pool.0.node_count").HasValue("2"), ), @@ -86,17 +85,17 @@ func testAccKubernetesCluster_removeAgent(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.addAgentConfig(data, 2), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("default_node_pool.0.node_count").HasValue("2"), ), }, { Config: r.addAgentConfig(data, 1), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("default_node_pool.0.node_count").HasValue("1"), ), }, @@ -112,17 +111,17 @@ func testAccKubernetesCluster_autoScalingError(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.autoScalingEnabled(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("default_node_pool.0.node_count").HasValue("2"), ), }, { Config: r.autoScalingEnabledUpdate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), ExpectError: regexp.MustCompile("cannot change `node_count` when `enable_auto_scaling` is set to `true`"), @@ -139,10 +138,10 @@ func testAccKubernetesCluster_autoScalingErrorMax(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.autoScalingEnabledUpdateMax(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), ExpectError: regexp.MustCompile("`node_count`\\(11\\) must be equal to or less than `max_count`\\(10\\) when `enable_auto_scaling` is set to `true`"), @@ -159,10 +158,10 @@ func testAccKubernetesCluster_autoScalingWithMaxCount(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.autoScalingWithMaxCountConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -179,10 +178,10 @@ func testAccKubernetesCluster_autoScalingErrorMin(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.autoScalingEnabledUpdateMin(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), ExpectError: regexp.MustCompile("`node_count`\\(1\\) must be equal to or greater than `min_count`\\(2\\) when `enable_auto_scaling` is set to `true`"), @@ -199,10 +198,10 @@ func testAccKubernetesCluster_autoScalingNodeCountUnset(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.autoscaleNodeCountUnsetConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("default_node_pool.0.min_count").HasValue("2"), check.That(data.ResourceName).Key("default_node_pool.0.max_count").HasValue("4"), @@ -231,10 +230,10 @@ func testAccKubernetesCluster_autoScalingNoAvailabilityZones(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.autoscaleNoAvailabilityZonesConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("default_node_pool.0.type").HasValue("VirtualMachineScaleSets"), check.That(data.ResourceName).Key("default_node_pool.0.min_count").HasValue("1"), @@ -255,10 +254,10 @@ func testAccKubernetesCluster_autoScalingWithAvailabilityZones(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.autoscaleWithAvailabilityZonesConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("default_node_pool.0.type").HasValue("VirtualMachineScaleSets"), check.That(data.ResourceName).Key("default_node_pool.0.min_count").HasValue("1"), @@ -282,10 +281,10 @@ func testAccKubernetesCluster_autoScalingProfile(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.autoScalingProfileConfig(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("default_node_pool.0.enable_auto_scaling").HasValue("true"), check.That(data.ResourceName).Key("auto_scaler_profile.0.expander").HasValue("least-waste"), diff --git a/azurerm/internal/services/containers/kubernetes_cluster_upgrade_test.go b/azurerm/internal/services/containers/kubernetes_cluster_upgrade_test.go index 4541d2d7ae1c..d1e1fe320ecc 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_upgrade_test.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_upgrade_test.go @@ -5,7 +5,6 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -29,17 +28,17 @@ func testAccKubernetesCluster_upgradeAutoScaleMinCount(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.upgradeAutoScaleMinCountConfig(data, olderKubernetesVersion, 3, 8), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.upgradeAutoScaleMinCountConfig(data, olderKubernetesVersion, 4, 8), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -56,10 +55,10 @@ func testAccKubernetesCluster_upgradeControlPlane(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.upgradeControlPlaneConfig(data, olderKubernetesVersion), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("kubernetes_version").HasValue(olderKubernetesVersion), check.That(data.ResourceName).Key("default_node_pool.0.orchestrator_version").HasValue(olderKubernetesVersion), @@ -68,7 +67,7 @@ func testAccKubernetesCluster_upgradeControlPlane(t *testing.T) { data.ImportStep(), { Config: r.upgradeControlPlaneConfig(data, currentKubernetesVersion), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), // the control plane should have been upgraded but the default node pool shouldn't have been // TODO: confirm if we can roll the default node pool if the value is unset in the config @@ -89,10 +88,10 @@ func testAccKubernetesCluster_upgradeControlPlaneAndDefaultNodePoolTogether(t *t data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.upgradeControlPlaneDefaultNodePoolConfig(data, olderKubernetesVersion, olderKubernetesVersion), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("kubernetes_version").HasValue(olderKubernetesVersion), check.That(data.ResourceName).Key("default_node_pool.0.orchestrator_version").HasValue(olderKubernetesVersion), @@ -101,7 +100,7 @@ func testAccKubernetesCluster_upgradeControlPlaneAndDefaultNodePoolTogether(t *t data.ImportStep(), { Config: r.upgradeControlPlaneDefaultNodePoolConfig(data, currentKubernetesVersion, currentKubernetesVersion), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("kubernetes_version").HasValue(currentKubernetesVersion), check.That(data.ResourceName).Key("default_node_pool.0.orchestrator_version").HasValue(currentKubernetesVersion), @@ -120,10 +119,10 @@ func testAccKubernetesCluster_upgradeControlPlaneAndDefaultNodePoolTwoPhase(t *t data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.upgradeControlPlaneDefaultNodePoolConfig(data, olderKubernetesVersion, olderKubernetesVersion), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("kubernetes_version").HasValue(olderKubernetesVersion), check.That(data.ResourceName).Key("default_node_pool.0.orchestrator_version").HasValue(olderKubernetesVersion), @@ -132,7 +131,7 @@ func testAccKubernetesCluster_upgradeControlPlaneAndDefaultNodePoolTwoPhase(t *t data.ImportStep(), { Config: r.upgradeControlPlaneDefaultNodePoolConfig(data, currentKubernetesVersion, olderKubernetesVersion), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("kubernetes_version").HasValue(currentKubernetesVersion), check.That(data.ResourceName).Key("default_node_pool.0.orchestrator_version").HasValue(olderKubernetesVersion), @@ -141,7 +140,7 @@ func testAccKubernetesCluster_upgradeControlPlaneAndDefaultNodePoolTwoPhase(t *t data.ImportStep(), { Config: r.upgradeControlPlaneDefaultNodePoolConfig(data, currentKubernetesVersion, currentKubernetesVersion), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("kubernetes_version").HasValue(currentKubernetesVersion), check.That(data.ResourceName).Key("default_node_pool.0.orchestrator_version").HasValue(currentKubernetesVersion), @@ -160,10 +159,10 @@ func testAccKubernetesCluster_upgradeNodePoolBeforeControlPlaneFails(t *testing. data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.upgradeControlPlaneDefaultNodePoolConfig(data, olderKubernetesVersion, olderKubernetesVersion), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("kubernetes_version").HasValue(olderKubernetesVersion), check.That(data.ResourceName).Key("default_node_pool.0.orchestrator_version").HasValue(olderKubernetesVersion), @@ -187,37 +186,37 @@ func testAccKubernetesCluster_upgradeCustomNodePoolAfterControlPlane(t *testing. r := KubernetesClusterResource{} nodePoolName := "azurerm_kubernetes_cluster_node_pool.test" - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // all on the older version Config: r.upgradeVersionsConfig(data, olderKubernetesVersion, olderKubernetesVersion, olderKubernetesVersion), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("kubernetes_version").HasValue(olderKubernetesVersion), check.That(data.ResourceName).Key("default_node_pool.0.orchestrator_version").HasValue(olderKubernetesVersion), - resource.TestCheckResourceAttr(nodePoolName, "orchestrator_version", olderKubernetesVersion), + acceptance.TestCheckResourceAttr(nodePoolName, "orchestrator_version", olderKubernetesVersion), ), }, data.ImportStep(), { // upgrade the control plane Config: r.upgradeVersionsConfig(data, currentKubernetesVersion, olderKubernetesVersion, olderKubernetesVersion), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("kubernetes_version").HasValue(currentKubernetesVersion), check.That(data.ResourceName).Key("default_node_pool.0.orchestrator_version").HasValue(olderKubernetesVersion), - resource.TestCheckResourceAttr(nodePoolName, "orchestrator_version", olderKubernetesVersion), + acceptance.TestCheckResourceAttr(nodePoolName, "orchestrator_version", olderKubernetesVersion), ), }, data.ImportStep(), { // upgrade the node pool Config: r.upgradeVersionsConfig(data, currentKubernetesVersion, olderKubernetesVersion, currentKubernetesVersion), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("kubernetes_version").HasValue(currentKubernetesVersion), check.That(data.ResourceName).Key("default_node_pool.0.orchestrator_version").HasValue(olderKubernetesVersion), - resource.TestCheckResourceAttr(nodePoolName, "orchestrator_version", currentKubernetesVersion), + acceptance.TestCheckResourceAttr(nodePoolName, "orchestrator_version", currentKubernetesVersion), ), }, data.ImportStep(), @@ -234,15 +233,15 @@ func testAccKubernetesCluster_upgradeCustomNodePoolBeforeControlPlaneFails(t *te r := KubernetesClusterResource{} nodePoolName := "azurerm_kubernetes_cluster_node_pool.test" - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // all on the older version Config: r.upgradeVersionsConfig(data, olderKubernetesVersion, olderKubernetesVersion, olderKubernetesVersion), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("kubernetes_version").HasValue(olderKubernetesVersion), check.That(data.ResourceName).Key("default_node_pool.0.orchestrator_version").HasValue(olderKubernetesVersion), - resource.TestCheckResourceAttr(nodePoolName, "orchestrator_version", olderKubernetesVersion), + acceptance.TestCheckResourceAttr(nodePoolName, "orchestrator_version", olderKubernetesVersion), ), }, data.ImportStep(), @@ -258,10 +257,10 @@ func TestAccKubernetesCluster_upgradeSettings(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.upgradeSettingsConfig(data, "2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("default_node_pool.0.upgrade_settings.#").HasValue("1"), check.That(data.ResourceName).Key("default_node_pool.0.upgrade_settings.0.max_surge").HasValue("2"), @@ -270,7 +269,7 @@ func TestAccKubernetesCluster_upgradeSettings(t *testing.T) { data.ImportStep(), { Config: r.upgradeSettingsConfig(data, ""), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("default_node_pool.0.upgrade_settings.#").HasValue("0"), ), @@ -278,7 +277,7 @@ func TestAccKubernetesCluster_upgradeSettings(t *testing.T) { data.ImportStep(), { Config: r.upgradeSettingsConfig(data, "2"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("default_node_pool.0.upgrade_settings.#").HasValue("1"), check.That(data.ResourceName).Key("default_node_pool.0.upgrade_settings.0.max_surge").HasValue("2"), diff --git a/azurerm/internal/services/containers/kubernetes_cluster_validate.go b/azurerm/internal/services/containers/kubernetes_cluster_validate.go index ec4b5c2ebd23..027325cb061d 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_validate.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_validate.go @@ -7,12 +7,12 @@ import ( "strings" "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-03-01/containerservice" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/client" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func validateKubernetesCluster(d *schema.ResourceData, cluster *containerservice.ManagedCluster, resourceGroup, name string) error { +func validateKubernetesCluster(d *pluginsdk.ResourceData, cluster *containerservice.ManagedCluster, resourceGroup, name string) error { if v, exists := d.GetOk("network_profile"); exists { rawProfiles := v.([]interface{}) diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index 6b51d414caf7..a9a55eb8164f 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -2,36 +2,37 @@ package containers import ( "fmt" + "regexp" + "strconv" "strings" - computeValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/validate" - "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-03-01/containerservice" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + computeValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func SchemaDefaultNodePool() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func SchemaDefaultNodePool() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Required: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ // Required "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.KubernetesAgentPoolName, }, "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, Default: string(containerservice.AgentPoolTypeVirtualMachineScaleSets), @@ -42,7 +43,7 @@ func SchemaDefaultNodePool() *schema.Schema { }, "vm_size": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, @@ -50,81 +51,108 @@ func SchemaDefaultNodePool() *schema.Schema { // Optional "availability_zones": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "enable_auto_scaling": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, }, "enable_node_public_ip": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, ForceNew: true, }, "enable_host_encryption": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, + Optional: true, + ForceNew: true, + }, + + "kubelet_config": schemaNodePoolKubeletConfig(), + + "linux_os_config": schemaNodePoolLinuxOSConfig(), + + "fips_enabled": { + Type: pluginsdk.TypeBool, Optional: true, ForceNew: true, }, + "kubelet_disk_type": { + Type: pluginsdk.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{ + string(containerservice.KubeletDiskTypeOS), + }, false), + }, + "max_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, // NOTE: rather than setting `0` users should instead pass `null` here ValidateFunc: validation.IntBetween(1, 1000), }, "max_pods": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ForceNew: true, }, "min_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, // NOTE: rather than setting `0` users should instead pass `null` here ValidateFunc: validation.IntBetween(1, 1000), }, "node_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validation.IntBetween(1, 1000), }, "node_labels": { - Type: schema.TypeMap, + Type: pluginsdk.TypeMap, ForceNew: true, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, + "node_public_ip_prefix_id": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + RequiredWith: []string{"default_node_pool.0.enable_node_public_ip"}, + }, + "node_taints": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, ForceNew: true, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "tags": tags.Schema(), "os_disk_size_gb": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, ForceNew: true, Computed: true, @@ -132,7 +160,7 @@ func SchemaDefaultNodePool() *schema.Schema { }, "os_disk_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, Default: containerservice.OSDiskTypeManaged, @@ -143,25 +171,25 @@ func SchemaDefaultNodePool() *schema.Schema { }, "vnet_subnet_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: azure.ValidateResourceID, }, "orchestrator_version": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ValidateFunc: validation.StringIsNotEmpty, }, "proximity_placement_group_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: computeValidate.ProximityPlacementGroupID, }, "only_critical_addons_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, ForceNew: true, }, @@ -172,6 +200,352 @@ func SchemaDefaultNodePool() *schema.Schema { } } +func schemaNodePoolKubeletConfig() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "cpu_manager_policy": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + "none", + "static", + }, false), + }, + + "cpu_cfs_quota_enabled": { + Type: pluginsdk.TypeBool, + Optional: true, + ForceNew: true, + }, + + "cpu_cfs_quota_period": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + }, + + "image_gc_high_threshold": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(0, 100), + }, + + "image_gc_low_threshold": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(0, 100), + }, + + "topology_manager_policy": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + "none", + "best-effort", + "restricted", + "single-numa-node", + }, false), + }, + + "allowed_unsafe_sysctls": { + Type: pluginsdk.TypeSet, + Optional: true, + ForceNew: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "container_log_max_size_mb": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + }, + + "container_log_max_line": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(2), + }, + + "pod_max_pid": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + }, + }, + }, + } +} + +func schemaNodePoolLinuxOSConfig() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "sysctl_config": schemaNodePoolSysctlConfig(), + + "transparent_huge_page_enabled": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + "always", + "madvise", + "never", + }, false), + }, + + "transparent_huge_page_defrag": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + "always", + "defer", + "defer+madvise", + "madvise", + "never", + }, false), + }, + + "swap_file_size_mb": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + }, + }, + }, + } +} + +func schemaNodePoolSysctlConfig() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "fs_aio_max_nr": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(65536, 6553500), + }, + + "fs_file_max": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(8192, 12000500), + }, + + "fs_inotify_max_user_watches": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(781250, 2097152), + }, + + "fs_nr_open": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(8192, 20000500), + }, + + "kernel_threads_max": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(20, 513785), + }, + + "net_core_netdev_max_backlog": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(1000, 3240000), + }, + + "net_core_optmem_max": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(20480, 4194304), + }, + + "net_core_rmem_default": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(212992, 134217728), + }, + + "net_core_rmem_max": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(212992, 134217728), + }, + + "net_core_somaxconn": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(4096, 3240000), + }, + + "net_core_wmem_default": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(212992, 134217728), + }, + + "net_core_wmem_max": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(212992, 134217728), + }, + + "net_ipv4_ip_local_port_range_min": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(1024, 60999), + }, + + "net_ipv4_ip_local_port_range_max": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(1024, 60999), + }, + + "net_ipv4_neigh_default_gc_thresh1": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(128, 80000), + }, + + "net_ipv4_neigh_default_gc_thresh2": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(512, 90000), + }, + + "net_ipv4_neigh_default_gc_thresh3": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(1024, 100000), + }, + + "net_ipv4_tcp_fin_timeout": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(5, 120), + }, + + "net_ipv4_tcp_keepalive_intvl": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(10, 75), + }, + + "net_ipv4_tcp_keepalive_probes": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(1, 15), + }, + + "net_ipv4_tcp_keepalive_time": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(30, 432000), + }, + + "net_ipv4_tcp_max_syn_backlog": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(128, 3240000), + }, + + "net_ipv4_tcp_max_tw_buckets": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(8000, 1440000), + }, + + "net_ipv4_tcp_tw_reuse": { + Type: pluginsdk.TypeBool, + Optional: true, + ForceNew: true, + }, + + "net_netfilter_nf_conntrack_buckets": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(65536, 147456), + }, + + "net_netfilter_nf_conntrack_max": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(131072, 589824), + }, + + "vm_max_map_count": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(65530, 262144), + }, + + "vm_swappiness": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(0, 100), + }, + + "vm_vfs_cache_pressure": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(0, 100), + }, + }, + }, + } +} + func ConvertDefaultNodePoolToAgentPool(input *[]containerservice.ManagedClusterAgentPoolProfile) containerservice.AgentPool { defaultCluster := (*input)[0] return containerservice.AgentPool{ @@ -182,16 +556,21 @@ func ConvertDefaultNodePoolToAgentPool(input *[]containerservice.ManagedClusterA OsDiskSizeGB: defaultCluster.OsDiskSizeGB, OsDiskType: defaultCluster.OsDiskType, VnetSubnetID: defaultCluster.VnetSubnetID, + KubeletConfig: defaultCluster.KubeletConfig, + LinuxOSConfig: defaultCluster.LinuxOSConfig, MaxPods: defaultCluster.MaxPods, OsType: defaultCluster.OsType, MaxCount: defaultCluster.MaxCount, MinCount: defaultCluster.MinCount, EnableAutoScaling: defaultCluster.EnableAutoScaling, + EnableFIPS: defaultCluster.EnableFIPS, + KubeletDiskType: defaultCluster.KubeletDiskType, Type: defaultCluster.Type, OrchestratorVersion: defaultCluster.OrchestratorVersion, ProximityPlacementGroupID: defaultCluster.ProximityPlacementGroupID, AvailabilityZones: defaultCluster.AvailabilityZones, EnableNodePublicIP: defaultCluster.EnableNodePublicIP, + NodePublicIPPrefixID: defaultCluster.NodePublicIPPrefixID, ScaleSetPriority: defaultCluster.ScaleSetPriority, ScaleSetEvictionPolicy: defaultCluster.ScaleSetEvictionPolicy, SpotMaxPrice: defaultCluster.SpotMaxPrice, @@ -204,7 +583,7 @@ func ConvertDefaultNodePoolToAgentPool(input *[]containerservice.ManagedClusterA } } -func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedClusterAgentPoolProfile, error) { +func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]containerservice.ManagedClusterAgentPoolProfile, error) { input := d.Get("default_node_pool").([]interface{}) raw := input[0].(map[string]interface{}) @@ -227,8 +606,10 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC profile := containerservice.ManagedClusterAgentPoolProfile{ EnableAutoScaling: utils.Bool(enableAutoScaling), + EnableFIPS: utils.Bool(raw["fips_enabled"].(bool)), EnableNodePublicIP: utils.Bool(raw["enable_node_public_ip"].(bool)), EnableEncryptionAtHost: utils.Bool(raw["enable_host_encryption"].(bool)), + KubeletDiskType: containerservice.KubeletDiskType(raw["kubelet_disk_type"].(string)), Name: utils.String(raw["name"].(string)), NodeLabels: nodeLabels, NodeTaints: nodeTaints, @@ -265,6 +646,10 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC profile.MaxPods = utils.Int32(maxPods) } + if prefixID := raw["node_public_ip_prefix_id"].(string); prefixID != "" { + profile.NodePublicIPPrefixID = utils.String(prefixID) + } + if osDiskSizeGB := int32(raw["os_disk_size_gb"].(int)); osDiskSizeGB > 0 { profile.OsDiskSizeGB = utils.Int32(osDiskSizeGB) } @@ -333,12 +718,190 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC return nil, fmt.Errorf("`max_count`(%d) and `min_count`(%d) must be set to `null` when `enable_auto_scaling` is set to `false`", maxCount, minCount) } + if kubeletConfig := raw["kubelet_config"].([]interface{}); len(kubeletConfig) > 0 { + profile.KubeletConfig = expandAgentPoolKubeletConfig(kubeletConfig) + } + + if linuxOSConfig := raw["linux_os_config"].([]interface{}); len(linuxOSConfig) > 0 { + linuxOSConfig, err := expandAgentPoolLinuxOSConfig(linuxOSConfig) + if err != nil { + return nil, err + } + profile.LinuxOSConfig = linuxOSConfig + } + return &[]containerservice.ManagedClusterAgentPoolProfile{ profile, }, nil } -func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolProfile, d *schema.ResourceData) (*[]interface{}, error) { +func expandAgentPoolKubeletConfig(input []interface{}) *containerservice.KubeletConfig { + if len(input) == 0 || input[0] == nil { + return nil + } + + raw := input[0].(map[string]interface{}) + result := &containerservice.KubeletConfig{ + CPUCfsQuota: utils.Bool(raw["cpu_cfs_quota_enabled"].(bool)), + // must be false, otherwise the backend will report error: CustomKubeletConfig.FailSwapOn must be set to false to enable swap file on nodes. + FailSwapOn: utils.Bool(false), + AllowedUnsafeSysctls: utils.ExpandStringSlice(raw["allowed_unsafe_sysctls"].(*pluginsdk.Set).List()), + } + + if v := raw["cpu_manager_policy"].(string); v != "" { + result.CPUManagerPolicy = utils.String(v) + } + if v := raw["cpu_cfs_quota_period"].(string); v != "" { + result.CPUCfsQuotaPeriod = utils.String(v) + } + if v := raw["image_gc_high_threshold"].(int); v != 0 { + result.ImageGcHighThreshold = utils.Int32(int32(v)) + } + if v := raw["image_gc_low_threshold"].(int); v != 0 { + result.ImageGcLowThreshold = utils.Int32(int32(v)) + } + if v := raw["topology_manager_policy"].(string); v != "" { + result.TopologyManagerPolicy = utils.String(v) + } + if v := raw["container_log_max_size_mb"].(int); v != 0 { + result.ContainerLogMaxSizeMB = utils.Int32(int32(v)) + } + if v := raw["container_log_max_line"].(int); v != 0 { + result.ContainerLogMaxFiles = utils.Int32(int32(v)) + } + if v := raw["pod_max_pid"].(int); v != 0 { + result.PodMaxPids = utils.Int32(int32(v)) + } + + return result +} + +func expandAgentPoolLinuxOSConfig(input []interface{}) (*containerservice.LinuxOSConfig, error) { + if len(input) == 0 || input[0] == nil { + return nil, nil + } + raw := input[0].(map[string]interface{}) + sysctlConfig, err := expandAgentPoolSysctlConfig(raw["sysctl_config"].([]interface{})) + if err != nil { + return nil, err + } + + result := &containerservice.LinuxOSConfig{ + Sysctls: sysctlConfig, + } + if v := raw["transparent_huge_page_enabled"].(string); v != "" { + result.TransparentHugePageEnabled = utils.String(v) + } + if v := raw["transparent_huge_page_defrag"].(string); v != "" { + result.TransparentHugePageDefrag = utils.String(v) + } + if v := raw["swap_file_size_mb"].(int); v != 0 { + result.SwapFileSizeMB = utils.Int32(int32(v)) + } + return result, nil +} + +func expandAgentPoolSysctlConfig(input []interface{}) (*containerservice.SysctlConfig, error) { + if len(input) == 0 || input[0] == nil { + return nil, nil + } + raw := input[0].(map[string]interface{}) + result := &containerservice.SysctlConfig{ + NetIpv4TCPTwReuse: utils.Bool(raw["net_ipv4_tcp_tw_reuse"].(bool)), + } + if v := raw["net_core_somaxconn"].(int); v != 0 { + result.NetCoreSomaxconn = utils.Int32(int32(v)) + } + if v := raw["net_core_netdev_max_backlog"].(int); v != 0 { + result.NetCoreNetdevMaxBacklog = utils.Int32(int32(v)) + } + if v := raw["net_core_rmem_default"].(int); v != 0 { + result.NetCoreRmemDefault = utils.Int32(int32(v)) + } + if v := raw["net_core_rmem_max"].(int); v != 0 { + result.NetCoreRmemMax = utils.Int32(int32(v)) + } + if v := raw["net_core_wmem_default"].(int); v != 0 { + result.NetCoreWmemDefault = utils.Int32(int32(v)) + } + if v := raw["net_core_wmem_max"].(int); v != 0 { + result.NetCoreWmemMax = utils.Int32(int32(v)) + } + if v := raw["net_core_optmem_max"].(int); v != 0 { + result.NetCoreOptmemMax = utils.Int32(int32(v)) + } + if v := raw["net_ipv4_tcp_max_syn_backlog"].(int); v != 0 { + result.NetIpv4TCPMaxSynBacklog = utils.Int32(int32(v)) + } + if v := raw["net_ipv4_tcp_max_tw_buckets"].(int); v != 0 { + result.NetIpv4TCPMaxTwBuckets = utils.Int32(int32(v)) + } + if v := raw["net_ipv4_tcp_fin_timeout"].(int); v != 0 { + result.NetIpv4TCPFinTimeout = utils.Int32(int32(v)) + } + if v := raw["net_ipv4_tcp_keepalive_time"].(int); v != 0 { + result.NetIpv4TCPKeepaliveTime = utils.Int32(int32(v)) + } + if v := raw["net_ipv4_tcp_keepalive_probes"].(int); v != 0 { + result.NetIpv4TCPKeepaliveProbes = utils.Int32(int32(v)) + } + if v := raw["net_ipv4_tcp_keepalive_intvl"].(int); v != 0 { + result.NetIpv4TcpkeepaliveIntvl = utils.Int32(int32(v)) + } + netIpv4IPLocalPortRangeMin := raw["net_ipv4_ip_local_port_range_min"].(int) + netIpv4IPLocalPortRangeMax := raw["net_ipv4_ip_local_port_range_max"].(int) + if (netIpv4IPLocalPortRangeMin != 0 && netIpv4IPLocalPortRangeMax == 0) || (netIpv4IPLocalPortRangeMin == 0 && netIpv4IPLocalPortRangeMax != 0) { + return nil, fmt.Errorf("`net_ipv4_ip_local_port_range_min` and `net_ipv4_ip_local_port_range_max` should both be set or unset") + } + if netIpv4IPLocalPortRangeMin > netIpv4IPLocalPortRangeMax { + return nil, fmt.Errorf("`net_ipv4_ip_local_port_range_min` should be no larger than `net_ipv4_ip_local_port_range_max`") + } + if netIpv4IPLocalPortRangeMin != 0 && netIpv4IPLocalPortRangeMax != 0 { + result.NetIpv4IPLocalPortRange = utils.String(fmt.Sprintf("%d %d", netIpv4IPLocalPortRangeMin, netIpv4IPLocalPortRangeMax)) + } + if v := raw["net_ipv4_neigh_default_gc_thresh1"].(int); v != 0 { + result.NetIpv4NeighDefaultGcThresh1 = utils.Int32(int32(v)) + } + if v := raw["net_ipv4_neigh_default_gc_thresh2"].(int); v != 0 { + result.NetIpv4NeighDefaultGcThresh2 = utils.Int32(int32(v)) + } + if v := raw["net_ipv4_neigh_default_gc_thresh3"].(int); v != 0 { + result.NetIpv4NeighDefaultGcThresh3 = utils.Int32(int32(v)) + } + if v := raw["net_netfilter_nf_conntrack_max"].(int); v != 0 { + result.NetNetfilterNfConntrackMax = utils.Int32(int32(v)) + } + if v := raw["net_netfilter_nf_conntrack_buckets"].(int); v != 0 { + result.NetNetfilterNfConntrackBuckets = utils.Int32(int32(v)) + } + if v := raw["fs_aio_max_nr"].(int); v != 0 { + result.FsAioMaxNr = utils.Int32(int32(v)) + } + if v := raw["fs_inotify_max_user_watches"].(int); v != 0 { + result.FsInotifyMaxUserWatches = utils.Int32(int32(v)) + } + if v := raw["fs_file_max"].(int); v != 0 { + result.FsFileMax = utils.Int32(int32(v)) + } + if v := raw["fs_nr_open"].(int); v != 0 { + result.FsNrOpen = utils.Int32(int32(v)) + } + if v := raw["kernel_threads_max"].(int); v != 0 { + result.KernelThreadsMax = utils.Int32(int32(v)) + } + if v := raw["vm_max_map_count"].(int); v != 0 { + result.VMMaxMapCount = utils.Int32(int32(v)) + } + if v := raw["vm_swappiness"].(int); v != 0 { + result.VMSwappiness = utils.Int32(int32(v)) + } + if v := raw["vm_vfs_cache_pressure"].(int); v != 0 { + result.VMVfsCachePressure = utils.Int32(int32(v)) + } + return result, nil +} + +func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolProfile, d *pluginsdk.ResourceData) (*[]interface{}, error) { if input == nil { return &[]interface{}{}, nil } @@ -363,6 +926,11 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro enableAutoScaling = *agentPool.EnableAutoScaling } + enableFIPS := false + if agentPool.EnableFIPS != nil { + enableFIPS = *agentPool.EnableFIPS + } + enableNodePublicIP := false if agentPool.EnableNodePublicIP != nil { enableNodePublicIP = *agentPool.EnableNodePublicIP @@ -401,6 +969,11 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro } } + nodePublicIPPrefixID := "" + if agentPool.NodePublicIPPrefixID != nil { + nodePublicIPPrefixID = *agentPool.NodePublicIPPrefixID + } + criticalAddonsEnabled := false if agentPool.NodeTaints != nil { for _, taint := range *agentPool.NodeTaints { @@ -441,19 +1014,25 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro } upgradeSettings := flattenUpgradeSettings(agentPool.UpgradeSettings) - + linuxOSConfig, err := flattenAgentPoolLinuxOSConfig(agentPool.LinuxOSConfig) + if err != nil { + return nil, err + } return &[]interface{}{ map[string]interface{}{ "availability_zones": availabilityZones, "enable_auto_scaling": enableAutoScaling, "enable_node_public_ip": enableNodePublicIP, "enable_host_encryption": enableHostEncryption, + "fips_enabled": enableFIPS, + "kubelet_disk_type": string(agentPool.KubeletDiskType), "max_count": maxCount, "max_pods": maxPods, "min_count": minCount, "name": name, "node_count": count, "node_labels": nodeLabels, + "node_public_ip_prefix_id": nodePublicIPPrefixID, "node_taints": []string{}, "os_disk_size_gb": osDiskSizeGB, "os_disk_type": string(osDiskType), @@ -465,11 +1044,261 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro "upgrade_settings": upgradeSettings, "vnet_subnet_id": vnetSubnetId, "only_critical_addons_enabled": criticalAddonsEnabled, + "kubelet_config": flattenAgentPoolKubeletConfig(agentPool.KubeletConfig), + "linux_os_config": linuxOSConfig, + }, + }, nil +} + +func flattenAgentPoolKubeletConfig(input *containerservice.KubeletConfig) []interface{} { + if input == nil { + return []interface{}{} + } + + var cpuManagerPolicy, cpuCfsQuotaPeriod, topologyManagerPolicy string + var cpuCfsQuotaEnabled bool + var imageGcHighThreshold, imageGcLowThreshold, containerLogMaxSizeMB, containerLogMaxLines, podMaxPids int + + if input.CPUManagerPolicy != nil { + cpuManagerPolicy = *input.CPUManagerPolicy + } + if input.CPUCfsQuota != nil { + cpuCfsQuotaEnabled = *input.CPUCfsQuota + } + if input.CPUCfsQuotaPeriod != nil { + cpuCfsQuotaPeriod = *input.CPUCfsQuotaPeriod + } + if input.ImageGcHighThreshold != nil { + imageGcHighThreshold = int(*input.ImageGcHighThreshold) + } + if input.ImageGcLowThreshold != nil { + imageGcLowThreshold = int(*input.ImageGcLowThreshold) + } + if input.TopologyManagerPolicy != nil { + topologyManagerPolicy = *input.TopologyManagerPolicy + } + if input.ContainerLogMaxSizeMB != nil { + containerLogMaxSizeMB = int(*input.ContainerLogMaxSizeMB) + } + if input.ContainerLogMaxFiles != nil { + containerLogMaxLines = int(*input.ContainerLogMaxFiles) + } + if input.PodMaxPids != nil { + podMaxPids = int(*input.PodMaxPids) + } + + return []interface{}{ + map[string]interface{}{ + "cpu_manager_policy": cpuManagerPolicy, + "cpu_cfs_quota_enabled": cpuCfsQuotaEnabled, + "cpu_cfs_quota_period": cpuCfsQuotaPeriod, + "image_gc_high_threshold": imageGcHighThreshold, + "image_gc_low_threshold": imageGcLowThreshold, + "topology_manager_policy": topologyManagerPolicy, + "allowed_unsafe_sysctls": utils.FlattenStringSlice(input.AllowedUnsafeSysctls), + "container_log_max_size_mb": containerLogMaxSizeMB, + "container_log_max_line": containerLogMaxLines, + "pod_max_pid": podMaxPids, + }, + } +} + +func flattenAgentPoolLinuxOSConfig(input *containerservice.LinuxOSConfig) ([]interface{}, error) { + if input == nil { + return make([]interface{}, 0), nil + } + + var swapFileSizeMB int + if input.SwapFileSizeMB != nil { + swapFileSizeMB = int(*input.SwapFileSizeMB) + } + var transparentHugePageDefrag string + if input.TransparentHugePageDefrag != nil { + transparentHugePageDefrag = *input.TransparentHugePageDefrag + } + var transparentHugePageEnabled string + if input.TransparentHugePageEnabled != nil { + transparentHugePageEnabled = *input.TransparentHugePageEnabled + } + sysctlConfig, err := flattenAgentPoolSysctlConfig(input.Sysctls) + if err != nil { + return nil, err + } + return []interface{}{ + map[string]interface{}{ + "swap_file_size_mb": swapFileSizeMB, + "sysctl_config": sysctlConfig, + "transparent_huge_page_defrag": transparentHugePageDefrag, + "transparent_huge_page_enabled": transparentHugePageEnabled, + }, + }, nil +} + +func flattenAgentPoolSysctlConfig(input *containerservice.SysctlConfig) ([]interface{}, error) { + if input == nil { + return make([]interface{}, 0), nil + } + + var fsAioMaxNr int + if input.FsAioMaxNr != nil { + fsAioMaxNr = int(*input.FsAioMaxNr) + } + var fsFileMax int + if input.FsFileMax != nil { + fsFileMax = int(*input.FsFileMax) + } + var fsInotifyMaxUserWatches int + if input.FsInotifyMaxUserWatches != nil { + fsInotifyMaxUserWatches = int(*input.FsInotifyMaxUserWatches) + } + var fsNrOpen int + if input.FsNrOpen != nil { + fsNrOpen = int(*input.FsNrOpen) + } + var kernelThreadsMax int + if input.KernelThreadsMax != nil { + kernelThreadsMax = int(*input.KernelThreadsMax) + } + var netCoreNetdevMaxBacklog int + if input.NetCoreNetdevMaxBacklog != nil { + netCoreNetdevMaxBacklog = int(*input.NetCoreNetdevMaxBacklog) + } + var netCoreOptmemMax int + if input.NetCoreOptmemMax != nil { + netCoreOptmemMax = int(*input.NetCoreOptmemMax) + } + var netCoreRmemDefault int + if input.NetCoreRmemDefault != nil { + netCoreRmemDefault = int(*input.NetCoreRmemDefault) + } + var netCoreRmemMax int + if input.NetCoreRmemMax != nil { + netCoreRmemMax = int(*input.NetCoreRmemMax) + } + var netCoreSomaxconn int + if input.NetCoreSomaxconn != nil { + netCoreSomaxconn = int(*input.NetCoreSomaxconn) + } + var netCoreWmemDefault int + if input.NetCoreWmemDefault != nil { + netCoreWmemDefault = int(*input.NetCoreWmemDefault) + } + var netCoreWmemMax int + if input.NetCoreWmemMax != nil { + netCoreWmemMax = int(*input.NetCoreWmemMax) + } + var netIpv4IpLocalPortRangeMin, netIpv4IpLocalPortRangeMax int + if input.NetIpv4IPLocalPortRange != nil { + arr := regexp.MustCompile("[ \t]+").Split(*input.NetIpv4IPLocalPortRange, -1) + if len(arr) != 2 { + return nil, fmt.Errorf("parsing `NetIpv4IPLocalPortRange` %s", *input.NetIpv4IPLocalPortRange) + } + var err error + netIpv4IpLocalPortRangeMin, err = strconv.Atoi(arr[0]) + if err != nil { + return nil, err + } + netIpv4IpLocalPortRangeMax, err = strconv.Atoi(arr[1]) + if err != nil { + return nil, err + } + } + var netIpv4NeighDefaultGcThresh1 int + if input.NetIpv4NeighDefaultGcThresh1 != nil { + netIpv4NeighDefaultGcThresh1 = int(*input.NetIpv4NeighDefaultGcThresh1) + } + var netIpv4NeighDefaultGcThresh2 int + if input.NetIpv4NeighDefaultGcThresh2 != nil { + netIpv4NeighDefaultGcThresh2 = int(*input.NetIpv4NeighDefaultGcThresh2) + } + var netIpv4NeighDefaultGcThresh3 int + if input.NetIpv4NeighDefaultGcThresh3 != nil { + netIpv4NeighDefaultGcThresh3 = int(*input.NetIpv4NeighDefaultGcThresh3) + } + var netIpv4TcpFinTimeout int + if input.NetIpv4TCPFinTimeout != nil { + netIpv4TcpFinTimeout = int(*input.NetIpv4TCPFinTimeout) + } + var netIpv4TcpkeepaliveIntvl int + if input.NetIpv4TcpkeepaliveIntvl != nil { + netIpv4TcpkeepaliveIntvl = int(*input.NetIpv4TcpkeepaliveIntvl) + } + var netIpv4TcpKeepaliveProbes int + if input.NetIpv4TCPKeepaliveProbes != nil { + netIpv4TcpKeepaliveProbes = int(*input.NetIpv4TCPKeepaliveProbes) + } + var netIpv4TcpKeepaliveTime int + if input.NetIpv4TCPKeepaliveTime != nil { + netIpv4TcpKeepaliveTime = int(*input.NetIpv4TCPKeepaliveTime) + } + var netIpv4TcpMaxSynBacklog int + if input.NetIpv4TCPMaxSynBacklog != nil { + netIpv4TcpMaxSynBacklog = int(*input.NetIpv4TCPMaxSynBacklog) + } + var netIpv4TcpMaxTwBuckets int + if input.NetIpv4TCPMaxTwBuckets != nil { + netIpv4TcpMaxTwBuckets = int(*input.NetIpv4TCPMaxTwBuckets) + } + var netIpv4TcpTwReuse bool + if input.NetIpv4TCPTwReuse != nil { + netIpv4TcpTwReuse = *input.NetIpv4TCPTwReuse + } + var netNetfilterNfConntrackBuckets int + if input.NetNetfilterNfConntrackBuckets != nil { + netNetfilterNfConntrackBuckets = int(*input.NetNetfilterNfConntrackBuckets) + } + var netNetfilterNfConntrackMax int + if input.NetNetfilterNfConntrackMax != nil { + netNetfilterNfConntrackMax = int(*input.NetNetfilterNfConntrackMax) + } + var vmMaxMapCount int + if input.VMMaxMapCount != nil { + vmMaxMapCount = int(*input.VMMaxMapCount) + } + var vmSwappiness int + if input.VMSwappiness != nil { + vmSwappiness = int(*input.VMSwappiness) + } + var vmVfsCachePressure int + if input.VMVfsCachePressure != nil { + vmVfsCachePressure = int(*input.VMVfsCachePressure) + } + return []interface{}{ + map[string]interface{}{ + "fs_aio_max_nr": fsAioMaxNr, + "fs_file_max": fsFileMax, + "fs_inotify_max_user_watches": fsInotifyMaxUserWatches, + "fs_nr_open": fsNrOpen, + "kernel_threads_max": kernelThreadsMax, + "net_core_netdev_max_backlog": netCoreNetdevMaxBacklog, + "net_core_optmem_max": netCoreOptmemMax, + "net_core_rmem_default": netCoreRmemDefault, + "net_core_rmem_max": netCoreRmemMax, + "net_core_somaxconn": netCoreSomaxconn, + "net_core_wmem_default": netCoreWmemDefault, + "net_core_wmem_max": netCoreWmemMax, + "net_ipv4_ip_local_port_range_min": netIpv4IpLocalPortRangeMin, + "net_ipv4_ip_local_port_range_max": netIpv4IpLocalPortRangeMax, + "net_ipv4_neigh_default_gc_thresh1": netIpv4NeighDefaultGcThresh1, + "net_ipv4_neigh_default_gc_thresh2": netIpv4NeighDefaultGcThresh2, + "net_ipv4_neigh_default_gc_thresh3": netIpv4NeighDefaultGcThresh3, + "net_ipv4_tcp_fin_timeout": netIpv4TcpFinTimeout, + "net_ipv4_tcp_keepalive_intvl": netIpv4TcpkeepaliveIntvl, + "net_ipv4_tcp_keepalive_probes": netIpv4TcpKeepaliveProbes, + "net_ipv4_tcp_keepalive_time": netIpv4TcpKeepaliveTime, + "net_ipv4_tcp_max_syn_backlog": netIpv4TcpMaxSynBacklog, + "net_ipv4_tcp_max_tw_buckets": netIpv4TcpMaxTwBuckets, + "net_ipv4_tcp_tw_reuse": netIpv4TcpTwReuse, + "net_netfilter_nf_conntrack_buckets": netNetfilterNfConntrackBuckets, + "net_netfilter_nf_conntrack_max": netNetfilterNfConntrackMax, + "vm_max_map_count": vmMaxMapCount, + "vm_swappiness": vmSwappiness, + "vm_vfs_cache_pressure": vmVfsCachePressure, }, }, nil } -func findDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolProfile, d *schema.ResourceData) (*containerservice.ManagedClusterAgentPoolProfile, error) { +func findDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolProfile, d *pluginsdk.ResourceData) (*containerservice.ManagedClusterAgentPoolProfile, error) { // first try loading this from the Resource Data if possible (e.g. when Created) defaultNodePoolName := d.Get("default_node_pool.0.name") diff --git a/azurerm/internal/services/containers/kubernetes_service_version_data_source.go b/azurerm/internal/services/containers/kubernetes_service_version_data_source.go index f993603e765f..2c077c7efd7c 100644 --- a/azurerm/internal/services/containers/kubernetes_service_version_data_source.go +++ b/azurerm/internal/services/containers/kubernetes_service_version_data_source.go @@ -7,42 +7,42 @@ import ( "time" "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceKubernetesServiceVersions() *schema.Resource { - return &schema.Resource{ +func dataSourceKubernetesServiceVersions() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceKubernetesServiceVersionsRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "location": azure.SchemaLocation(), "version_prefix": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "versions": { - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, + Type: pluginsdk.TypeList, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, Computed: true, }, "latest_version": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "include_preview": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, @@ -50,7 +50,7 @@ func dataSourceKubernetesServiceVersions() *schema.Resource { } } -func dataSourceKubernetesServiceVersionsRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceKubernetesServiceVersionsRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Containers.ServicesClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/containers/kubernetes_service_version_data_source_test.go b/azurerm/internal/services/containers/kubernetes_service_version_data_source_test.go index dc291da1d35d..1aea1d81d6a5 100644 --- a/azurerm/internal/services/containers/kubernetes_service_version_data_source_test.go +++ b/azurerm/internal/services/containers/kubernetes_service_version_data_source_test.go @@ -5,7 +5,6 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -20,14 +19,14 @@ func TestAccDataSourceAzureRMKubernetesServiceVersions_basic(t *testing.T) { r := KubernetesServiceVersionDataSource{} kvrx := regexp.MustCompile(k8sVersionRX) - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("versions.#").Exists(), - resource.TestMatchResourceAttr(data.ResourceName, "versions.0", kvrx), + acceptance.TestMatchResourceAttr(data.ResourceName, "versions.0", kvrx), check.That(data.ResourceName).Key("latest_version").Exists(), - resource.TestMatchResourceAttr(data.ResourceName, "latest_version", kvrx), + acceptance.TestMatchResourceAttr(data.ResourceName, "latest_version", kvrx), ), }, }) @@ -38,14 +37,14 @@ func TestAccDataSourceAzureRMKubernetesServiceVersions_filtered(t *testing.T) { r := KubernetesServiceVersionDataSource{} kvrx := regexp.MustCompile(k8sVersionRX) - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.filtered(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("versions.#").Exists(), - resource.TestMatchResourceAttr(data.ResourceName, "versions.0", kvrx), + acceptance.TestMatchResourceAttr(data.ResourceName, "versions.0", kvrx), check.That(data.ResourceName).Key("latest_version").Exists(), - resource.TestMatchResourceAttr(data.ResourceName, "latest_version", kvrx), + acceptance.TestMatchResourceAttr(data.ResourceName, "latest_version", kvrx), ), }, }) @@ -56,14 +55,14 @@ func TestAccDataSourceAzureRMKubernetesServiceVersions_nopreview(t *testing.T) { r := KubernetesServiceVersionDataSource{} kvrx := regexp.MustCompile(k8sVersionRX) - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.nopreview(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("versions.#").Exists(), - resource.TestMatchResourceAttr(data.ResourceName, "versions.0", kvrx), + acceptance.TestMatchResourceAttr(data.ResourceName, "versions.0", kvrx), check.That(data.ResourceName).Key("latest_version").Exists(), - resource.TestMatchResourceAttr(data.ResourceName, "latest_version", kvrx), + acceptance.TestMatchResourceAttr(data.ResourceName, "latest_version", kvrx), ), }, }) diff --git a/azurerm/internal/services/containers/migration/registry.go b/azurerm/internal/services/containers/migration/registry.go index 887dbfc98dcd..18981da2dcd6 100644 --- a/azurerm/internal/services/containers/migration/registry.go +++ b/azurerm/internal/services/containers/migration/registry.go @@ -6,7 +6,6 @@ import ( "strings" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) @@ -40,7 +39,7 @@ func (RegistryV1ToV2) UpgradeFunc() pluginsdk.StateUpgraderFunc { storageAccountId := "" if v, ok := rawState["storage_account"]; ok { - raw := v.(*schema.Set).List() + raw := v.(*pluginsdk.Set).List() rawVals := raw[0].(map[string]interface{}) storageAccountName := rawVals["name"].(string) @@ -75,45 +74,45 @@ func (RegistryV1ToV2) UpgradeFunc() pluginsdk.StateUpgraderFunc { } func registrySchemaForV0AndV1() map[string]*pluginsdk.Schema { - return map[string]*schema.Schema{ + return map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "resource_group_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "location": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "admin_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, // lintignore:S018 "storage_account": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "access_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, Sensitive: true, }, @@ -122,25 +121,25 @@ func registrySchemaForV0AndV1() map[string]*pluginsdk.Schema { }, "login_server": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "admin_username": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "admin_password": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "tags": { - Type: schema.TypeMap, + Type: pluginsdk.TypeMap, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, } diff --git a/azurerm/internal/services/containers/probe.go b/azurerm/internal/services/containers/probe.go index bbbe0c568855..199df903d71b 100644 --- a/azurerm/internal/services/containers/probe.go +++ b/azurerm/internal/services/containers/probe.go @@ -1,51 +1,51 @@ package containers import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" ) -func SchemaContainerGroupProbe() *schema.Schema { +func SchemaContainerGroupProbe() *pluginsdk.Schema { //lintignore:XS003 - return &schema.Schema{ - Type: schema.TypeList, + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Optional: true, ForceNew: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "exec": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.NoZeroValues, }, }, //lintignore:XS003 "http_get": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "path": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, "port": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, ForceNew: true, ValidateFunc: validate.PortNumber, }, "scheme": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ @@ -58,31 +58,31 @@ func SchemaContainerGroupProbe() *schema.Schema { }, "initial_delay_seconds": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, ForceNew: true, }, "period_seconds": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, ForceNew: true, }, "failure_threshold": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, ForceNew: true, }, "success_threshold": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, ForceNew: true, }, "timeout_seconds": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, ForceNew: true, }, diff --git a/azurerm/internal/services/containers/registration.go b/azurerm/internal/services/containers/registration.go index 5459e6d8dad4..857f1a0b7e1c 100644 --- a/azurerm/internal/services/containers/registration.go +++ b/azurerm/internal/services/containers/registration.go @@ -1,7 +1,7 @@ package containers import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) type Registration struct{} @@ -19,8 +19,8 @@ func (r Registration) WebsiteCategories() []string { } // SupportedDataSources returns the supported Data Sources supported by this Service -func (r Registration) SupportedDataSources() map[string]*schema.Resource { - return map[string]*schema.Resource{ +func (r Registration) SupportedDataSources() map[string]*pluginsdk.Resource { + return map[string]*pluginsdk.Resource{ "azurerm_kubernetes_service_versions": dataSourceKubernetesServiceVersions(), "azurerm_container_registry": dataSourceContainerRegistry(), "azurerm_container_registry_token": dataSourceContainerRegistryToken(), @@ -31,8 +31,8 @@ func (r Registration) SupportedDataSources() map[string]*schema.Resource { } // SupportedResources returns the supported Resources supported by this Service -func (r Registration) SupportedResources() map[string]*schema.Resource { - return map[string]*schema.Resource{ +func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { + return map[string]*pluginsdk.Resource{ "azurerm_container_group": resourceContainerGroup(), "azurerm_container_registry_webhook": resourceContainerRegistryWebhook(), "azurerm_container_registry": resourceContainerRegistry(), diff --git a/azurerm/internal/services/cosmos/common/autoscale_settings.go b/azurerm/internal/services/cosmos/common/autoscale_settings.go index 095280795853..54243a46ce18 100644 --- a/azurerm/internal/services/cosmos/common/autoscale_settings.go +++ b/azurerm/internal/services/cosmos/common/autoscale_settings.go @@ -4,11 +4,11 @@ import ( "log" "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func ExpandCosmosDbAutoscaleSettings(d *schema.ResourceData) *documentdb.AutoscaleSettings { +func ExpandCosmosDbAutoscaleSettings(d *pluginsdk.ResourceData) *documentdb.AutoscaleSettings { i := d.Get("autoscale_settings").([]interface{}) if len(i) == 0 || i[0] == nil { log.Printf("[DEBUG] Cosmos DB autoscale settings are not set on the resource") @@ -53,7 +53,7 @@ func FlattenCosmosDbAutoscaleSettings(throughputResponse documentdb.ThroughputSe return append(results, result) } -func ExpandCosmosDbAutoscaleSettingsResource(d *schema.ResourceData) *documentdb.AutoscaleSettingsResource { +func ExpandCosmosDbAutoscaleSettingsResource(d *pluginsdk.ResourceData) *documentdb.AutoscaleSettingsResource { autoscaleSettings := ExpandCosmosDbAutoscaleSettings(d) autoscaleSettingResource := documentdb.AutoscaleSettingsResource{} diff --git a/azurerm/internal/services/cosmos/common/cors_rule.go b/azurerm/internal/services/cosmos/common/cors_rule.go index 94dae87d0941..55fd507f028e 100644 --- a/azurerm/internal/services/cosmos/common/cors_rule.go +++ b/azurerm/internal/services/cosmos/common/cors_rule.go @@ -4,12 +4,12 @@ import ( "strings" "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func SchemaCorsRule() *schema.Schema { +func SchemaCorsRule() *pluginsdk.Schema { allowedMethods := []string{ "DELETE", "GET", @@ -21,54 +21,54 @@ func SchemaCorsRule() *schema.Schema { "PATCH", } - return &schema.Schema{ - Type: schema.TypeList, + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "allowed_origins": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, MaxItems: 64, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, }, "exposed_headers": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, MaxItems: 64, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, }, "allowed_headers": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, MaxItems: 64, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, }, "allowed_methods": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, MaxItems: 64, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringInSlice(allowedMethods, false), }, }, "max_age_in_seconds": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, ValidateFunc: validation.IntBetween(1, 2000000000), }, diff --git a/azurerm/internal/services/cosmos/common/indexing_policy.go b/azurerm/internal/services/cosmos/common/indexing_policy.go index e9ee909a6de7..5e6ced0260e6 100644 --- a/azurerm/internal/services/cosmos/common/indexing_policy.go +++ b/azurerm/internal/services/cosmos/common/indexing_policy.go @@ -5,7 +5,7 @@ import ( "strings" "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -45,7 +45,7 @@ func expandAzureRmCosmosDBIndexingPolicyExcludedPaths(input []interface{}) *[]do return &paths } -func expandAzureRmCosmosDBIndexingPolicyCompositeIndexes(input []interface{}) *[][]documentdb.CompositePath { +func ExpandAzureRmCosmosDBIndexingPolicyCompositeIndexes(input []interface{}) *[][]documentdb.CompositePath { indexes := make([][]documentdb.CompositePath, 0) for _, i := range input { @@ -66,7 +66,31 @@ func expandAzureRmCosmosDBIndexingPolicyCompositeIndexes(input []interface{}) *[ return &indexes } -func ExpandAzureRmCosmosDbIndexingPolicy(d *schema.ResourceData) *documentdb.IndexingPolicy { +func ExpandAzureRmCosmosDBIndexingPolicySpatialIndexes(input []interface{}) *[]documentdb.SpatialSpec { + if len(input) == 0 || input[0] == nil { + return nil + } + indexes := make([]documentdb.SpatialSpec, 0) + // no matter what spatial types are updated, all types will be set and returned from service + spatialTypes := []documentdb.SpatialType{ + documentdb.SpatialTypeLineString, + documentdb.SpatialTypeMultiPolygon, + documentdb.SpatialTypePoint, + documentdb.SpatialTypePolygon, + } + + for _, i := range input { + indexPair := i.(map[string]interface{}) + indexes = append(indexes, documentdb.SpatialSpec{ + Types: &spatialTypes, + Path: utils.String(indexPair["path"].(string)), + }) + } + + return &indexes +} + +func ExpandAzureRmCosmosDbIndexingPolicy(d *pluginsdk.ResourceData) *documentdb.IndexingPolicy { i := d.Get("indexing_policy").([]interface{}) if len(i) == 0 || i[0] == nil { @@ -83,8 +107,11 @@ func ExpandAzureRmCosmosDbIndexingPolicy(d *schema.ResourceData) *documentdb.Ind } if v, ok := input["composite_index"].([]interface{}); ok { - policy.CompositeIndexes = expandAzureRmCosmosDBIndexingPolicyCompositeIndexes(v) + policy.CompositeIndexes = ExpandAzureRmCosmosDBIndexingPolicyCompositeIndexes(v) } + + policy.SpatialIndexes = ExpandAzureRmCosmosDBIndexingPolicySpatialIndexes(input["spatial_index"].([]interface{})) + return policy } @@ -131,7 +158,7 @@ func flattenCosmosDBIndexingPolicyCompositeIndex(input []documentdb.CompositePat return indexPairs } -func flattenCosmosDBIndexingPolicyCompositeIndexes(input *[][]documentdb.CompositePath) []interface{} { +func FlattenCosmosDBIndexingPolicyCompositeIndexes(input *[][]documentdb.CompositePath) []interface{} { if input == nil { return []interface{}{} } @@ -163,6 +190,41 @@ func flattenCosmosDBIndexingPolicyIncludedPaths(input *[]documentdb.IncludedPath return includedPaths } +func FlattenCosmosDBIndexingPolicySpatialIndexes(input *[]documentdb.SpatialSpec) []interface{} { + if input == nil { + return []interface{}{} + } + + indexes := make([]interface{}, 0) + + for _, v := range *input { + var path string + if v.Path != nil { + path = *v.Path + } + indexes = append(indexes, map[string]interface{}{ + "path": path, + "types": flattenCosmosDBIndexingPolicySpatialIndexesTypes(v.Types), + }) + } + + return indexes +} + +func flattenCosmosDBIndexingPolicySpatialIndexesTypes(input *[]documentdb.SpatialType) []interface{} { + if input == nil { + return nil + } + + types := make([]interface{}, 0) + + for _, v := range *input { + types = append(types, string(v)) + } + + return types +} + func FlattenAzureRmCosmosDbIndexingPolicy(indexingPolicy *documentdb.IndexingPolicy) []interface{} { results := make([]interface{}, 0) if indexingPolicy == nil { @@ -173,7 +235,8 @@ func FlattenAzureRmCosmosDbIndexingPolicy(indexingPolicy *documentdb.IndexingPol result["indexing_mode"] = strings.Title(string(indexingPolicy.IndexingMode)) result["included_path"] = flattenCosmosDBIndexingPolicyIncludedPaths(indexingPolicy.IncludedPaths) result["excluded_path"] = flattenCosmosDBIndexingPolicyExcludedPaths(indexingPolicy.ExcludedPaths) - result["composite_index"] = flattenCosmosDBIndexingPolicyCompositeIndexes(indexingPolicy.CompositeIndexes) + result["composite_index"] = FlattenCosmosDBIndexingPolicyCompositeIndexes(indexingPolicy.CompositeIndexes) + result["spatial_index"] = FlattenCosmosDBIndexingPolicySpatialIndexes(indexingPolicy.SpatialIndexes) results = append(results, result) return results diff --git a/azurerm/internal/services/cosmos/common/schema.go b/azurerm/internal/services/cosmos/common/schema.go index 65a873fb2cf4..1e749ac06f73 100644 --- a/azurerm/internal/services/cosmos/common/schema.go +++ b/azurerm/internal/services/cosmos/common/schema.go @@ -2,46 +2,46 @@ package common import ( "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" ) -func CassandraTableSchemaPropertySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func CassandraTableSchemaPropertySchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Required: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "column": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { Required: true, - Type: schema.TypeString, + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, "type": { Required: true, - Type: schema.TypeString, + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, }, }, }, "partition_key": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { Required: true, - Type: schema.TypeString, + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, }, @@ -49,16 +49,16 @@ func CassandraTableSchemaPropertySchema() *schema.Schema { }, "cluster_key": { Optional: true, - Type: schema.TypeList, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Type: pluginsdk.TypeList, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, "order_by": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ "Asc", @@ -73,16 +73,16 @@ func CassandraTableSchemaPropertySchema() *schema.Schema { } } -func DatabaseAutoscaleSettingsSchema() *schema.Schema { +func DatabaseAutoscaleSettingsSchema() *pluginsdk.Schema { //lintignore:XS003 - return &schema.Schema{ - Type: schema.TypeList, + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "max_throughput": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ConflictsWith: []string{"throughput"}, @@ -93,26 +93,26 @@ func DatabaseAutoscaleSettingsSchema() *schema.Schema { } } -func MongoCollectionAutoscaleSettingsSchema() *schema.Schema { +func MongoCollectionAutoscaleSettingsSchema() *pluginsdk.Schema { autoscaleSettingsDatabaseSchema := DatabaseAutoscaleSettingsSchema() autoscaleSettingsDatabaseSchema.RequiredWith = []string{"shard_key"} return autoscaleSettingsDatabaseSchema } -func CosmosDbIndexingPolicySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func CosmosDbIndexingPolicySchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Optional: true, Computed: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ // `automatic` is excluded as it is deprecated; see https://stackoverflow.com/a/58721386 // `indexing_mode` case changes from 2020-04-01 to 2021-01-15 issue https://github.com/Azure/azure-rest-api-specs/issues/14051 // todo: change to SDK constants and remove translation code in 3.0 "indexing_mode": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: documentdb.Consistent, DiffSuppressFunc: suppress.CaseDifference, // Open issue https://github.com/Azure/azure-sdk-for-go/issues/6603 @@ -123,13 +123,13 @@ func CosmosDbIndexingPolicySchema() *schema.Schema { }, "included_path": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "path": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, @@ -137,70 +137,38 @@ func CosmosDbIndexingPolicySchema() *schema.Schema { }, }, "excluded_path": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "path": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, }, }, }, - "composite_index": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "index": { - Type: schema.TypeList, - MinItems: 1, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "path": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringIsNotEmpty, - }, - // `order` case changes from 2020-04-01 to 2021-01-15, issue opened:https://github.com/Azure/azure-rest-api-specs/issues/14051 - // todo: change to SDK constants and remove translation code in 3.0 - "order": { - Type: schema.TypeString, - Required: true, - // Workaround for Azure/azure-rest-api-specs#11222 - DiffSuppressFunc: suppress.CaseDifference, - ValidateFunc: validation.StringInSlice( - []string{ - "Ascending", - "Descending", - }, false), - }, - }, - }, - }, - }, - }, - }, + "composite_index": CosmosDbIndexingPolicyCompositeIndexSchema(), + + "spatial_index": CosmosDbIndexingPolicySpatialIndexSchema(), }, }, } } -func ConflictResolutionPolicy() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, +func ConflictResolutionPolicy() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, Optional: true, Computed: true, ForceNew: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "mode": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ string(documentdb.LastWriterWins), @@ -209,13 +177,13 @@ func ConflictResolutionPolicy() *schema.Schema { }, "conflict_resolution_path": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringIsNotEmpty, }, "conflict_resolution_procedure": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringIsNotEmpty, }, @@ -223,3 +191,65 @@ func ConflictResolutionPolicy() *schema.Schema { }, } } + +func CosmosDbIndexingPolicyCompositeIndexSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "index": { + Type: pluginsdk.TypeList, + MinItems: 1, + Required: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "path": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + // `order` case changes from 2020-04-01 to 2021-01-15, issue opened:https://github.com/Azure/azure-rest-api-specs/issues/14051 + // todo: change to SDK constants and remove translation code in 3.0 + "order": { + Type: pluginsdk.TypeString, + Required: true, + // Workaround for Azure/azure-rest-api-specs#11222 + DiffSuppressFunc: suppress.CaseDifference, + ValidateFunc: validation.StringInSlice( + []string{ + "Ascending", + "Descending", + }, false), + }, + }, + }, + }, + }, + }, + } +} + +func CosmosDbIndexingPolicySpatialIndexSchema() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "path": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "types": { + Type: pluginsdk.TypeSet, + Computed: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + }, + }, + } +} diff --git a/azurerm/internal/services/cosmos/common/throughput.go b/azurerm/internal/services/cosmos/common/throughput.go index 74b599203d9b..2cbaf407a75f 100644 --- a/azurerm/internal/services/cosmos/common/throughput.go +++ b/azurerm/internal/services/cosmos/common/throughput.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -26,7 +26,7 @@ func ConvertThroughputFromResourceData(throughput interface{}) *int32 { return utils.Int32(int32(throughput.(int))) } -func ExpandCosmosDBThroughputSettingsUpdateParameters(d *schema.ResourceData) *documentdb.ThroughputSettingsUpdateParameters { +func ExpandCosmosDBThroughputSettingsUpdateParameters(d *pluginsdk.ResourceData) *documentdb.ThroughputSettingsUpdateParameters { throughputParameters := documentdb.ThroughputSettingsUpdateParameters{ ThroughputSettingsUpdateProperties: &documentdb.ThroughputSettingsUpdateProperties{ Resource: &documentdb.ThroughputSettingsResource{}, @@ -46,14 +46,14 @@ func ExpandCosmosDBThroughputSettingsUpdateParameters(d *schema.ResourceData) *d return &throughputParameters } -func SetResourceDataThroughputFromResponse(throughputResponse documentdb.ThroughputSettingsGetResults, d *schema.ResourceData) { +func SetResourceDataThroughputFromResponse(throughputResponse documentdb.ThroughputSettingsGetResults, d *pluginsdk.ResourceData) { d.Set("throughput", GetThroughputFromResult(throughputResponse)) autoscaleSettings := FlattenCosmosDbAutoscaleSettings(throughputResponse) d.Set("autoscale_settings", autoscaleSettings) } -func CheckForChangeFromAutoscaleAndManualThroughput(d *schema.ResourceData) error { +func CheckForChangeFromAutoscaleAndManualThroughput(d *pluginsdk.ResourceData) error { if d.HasChange("throughput") && d.HasChange("autoscale_settings") { return fmt.Errorf("switching between autoscale and manually provisioned throughput via Terraform is not supported at this time") } @@ -61,6 +61,6 @@ func CheckForChangeFromAutoscaleAndManualThroughput(d *schema.ResourceData) erro return nil } -func HasThroughputChange(d *schema.ResourceData) bool { +func HasThroughputChange(d *pluginsdk.ResourceData) bool { return d.HasChanges("throughput", "autoscale_settings") } diff --git a/azurerm/internal/services/cosmos/cosmosdb_account_data_source.go b/azurerm/internal/services/cosmos/cosmosdb_account_data_source.go index 8d06fef0e9bc..e6b82e575426 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_account_data_source.go +++ b/azurerm/internal/services/cosmos/cosmosdb_account_data_source.go @@ -6,27 +6,26 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/common" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" - - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceCosmosDbAccount() *schema.Resource { - return &schema.Resource{ +func dataSourceCosmosDbAccount() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceCosmosDbAccountRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, @@ -37,47 +36,47 @@ func dataSourceCosmosDbAccount() *schema.Resource { "tags": tags.SchemaDataSource(), "offer_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "kind": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "ip_range_filter": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "enable_free_tier": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "enable_automatic_failover": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "consistency_policy": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "consistency_level": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "max_interval_in_seconds": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "max_staleness_prefix": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, }, @@ -85,22 +84,22 @@ func dataSourceCosmosDbAccount() *schema.Resource { }, "geo_location": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "location": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "failover_priority": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, }, @@ -108,12 +107,12 @@ func dataSourceCosmosDbAccount() *schema.Resource { }, "capabilities": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -121,17 +120,17 @@ func dataSourceCosmosDbAccount() *schema.Resource { }, "is_virtual_network_filter_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "virtual_network_rule": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -139,83 +138,83 @@ func dataSourceCosmosDbAccount() *schema.Resource { }, "key_vault_key_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "enable_multiple_write_locations": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "endpoint": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "read_endpoints": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "write_endpoints": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "primary_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "secondary_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "primary_readonly_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "secondary_readonly_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "primary_master_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, Deprecated: "This property has been renamed to `primary_key` and will be removed in v3.0 of the provider in support of HashiCorp's inclusive language policy which can be found here: https://discuss.hashicorp.com/t/inclusive-language-changes", }, "secondary_master_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, Deprecated: "This property has been renamed to `secondary_key` and will be removed in v3.0 of the provider in support of HashiCorp's inclusive language policy which can be found here: https://discuss.hashicorp.com/t/inclusive-language-changes", }, "primary_readonly_master_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, Deprecated: "This property has been renamed to `primary_readonly_key` and will be removed in v3.0 of the provider in support of HashiCorp's inclusive language policy which can be found here: https://discuss.hashicorp.com/t/inclusive-language-changes", }, "secondary_readonly_master_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, Deprecated: "This property has been renamed to `secondary_readonly_key` and will be removed in v3.0 of the provider in support of HashiCorp's inclusive language policy which can be found here: https://discuss.hashicorp.com/t/inclusive-language-changes", @@ -224,7 +223,7 @@ func dataSourceCosmosDbAccount() *schema.Resource { } } -func dataSourceCosmosDbAccountRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceCosmosDbAccountRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.DatabaseClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/cosmos/cosmosdb_account_data_source_test.go b/azurerm/internal/services/cosmos/cosmosdb_account_data_source_test.go index a311d5f644a0..922a8684cee8 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_account_data_source_test.go +++ b/azurerm/internal/services/cosmos/cosmosdb_account_data_source_test.go @@ -5,7 +5,6 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -17,10 +16,10 @@ func TestAccDataSourceCosmosDBAccount_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_cosmosdb_account", "test") r := CosmosDBAccountDataSourceResource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.BoundedStaleness, 1), ), }, @@ -31,10 +30,10 @@ func TestAccDataSourceCosmosDBAccount_complete(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_cosmosdb_account", "test") r := CosmosDBAccountDataSourceResource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.complete(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.BoundedStaleness, 3), check.That(data.ResourceName).Key("geo_location.0.location").HasValue(data.Locations.Primary), check.That(data.ResourceName).Key("geo_location.1.location").HasValue(data.Locations.Secondary), diff --git a/azurerm/internal/services/cosmos/cosmosdb_account_resource.go b/azurerm/internal/services/cosmos/cosmosdb_account_resource.go index fee2f293b879..b54fb3855e2e 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_account_resource.go +++ b/azurerm/internal/services/cosmos/cosmosdb_account_resource.go @@ -11,9 +11,6 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -26,13 +23,14 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) // If the consistency policy of the Cosmos DB Database Account is not bounded staleness, // any changes to the configuration for bounded staleness should be suppressed. -func suppressConsistencyPolicyStalenessConfiguration(_, _, _ string, d *schema.ResourceData) bool { +func suppressConsistencyPolicyStalenessConfiguration(_, _, _ string, d *pluginsdk.ResourceData) bool { consistencyPolicyList := d.Get("consistency_policy").([]interface{}) if len(consistencyPolicyList) == 0 || consistencyPolicyList[0] == nil { return false @@ -43,8 +41,8 @@ func suppressConsistencyPolicyStalenessConfiguration(_, _, _ string, d *schema.R return consistencyPolicy["consistency_level"].(string) != string(documentdb.BoundedStaleness) } -func resourceCosmosDbAccount() *schema.Resource { - return &schema.Resource{ +func resourceCosmosDbAccount() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceCosmosDbAccountCreate, Read: resourceCosmosDbAccountRead, Update: resourceCosmosDbAccountUpdate, @@ -52,16 +50,16 @@ func resourceCosmosDbAccount() *schema.Resource { // TODO: replace this with an importer which validates the ID during import Importer: pluginsdk.DefaultImporter(), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(180 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(180 * time.Minute), - Delete: schema.DefaultTimeout(180 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(180 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(180 * time.Minute), + Delete: pluginsdk.DefaultTimeout(180 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringMatch( @@ -76,7 +74,7 @@ func resourceCosmosDbAccount() *schema.Resource { // resource fields "offer_type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.StringInSlice([]string{ @@ -85,7 +83,7 @@ func resourceCosmosDbAccount() *schema.Resource { }, "kind": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, Default: string(documentdb.GlobalDocumentDB), @@ -98,7 +96,7 @@ func resourceCosmosDbAccount() *schema.Resource { }, "ip_range_filter": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringMatch( regexp.MustCompile(`^(\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(/([1-2][0-9]|3[0-2]))?\b[,]?)*$`), @@ -107,33 +105,33 @@ func resourceCosmosDbAccount() *schema.Resource { }, "enable_free_tier": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, ForceNew: true, }, "analytical_storage_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, ForceNew: true, }, "public_network_access_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, "enable_automatic_failover": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, "key_vault_key_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, DiffSuppressFunc: keyVaultSuppress.DiffSuppressIgnoreKeyVaultKeyVersion, @@ -141,13 +139,13 @@ func resourceCosmosDbAccount() *schema.Resource { }, "consistency_policy": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "consistency_level": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.StringInSlice([]string{ @@ -160,7 +158,7 @@ func resourceCosmosDbAccount() *schema.Resource { }, "max_interval_in_seconds": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, DiffSuppressFunc: suppressConsistencyPolicyStalenessConfiguration, @@ -168,7 +166,7 @@ func resourceCosmosDbAccount() *schema.Resource { }, "max_staleness_prefix": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, DiffSuppressFunc: suppressConsistencyPolicyStalenessConfiguration, @@ -179,12 +177,12 @@ func resourceCosmosDbAccount() *schema.Resource { }, "geo_location": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "prefix": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringMatch( regexp.MustCompile("^[-a-z0-9]{3,50}$"), @@ -194,20 +192,20 @@ func resourceCosmosDbAccount() *schema.Resource { }, "id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "location": location.SchemaWithoutForceNew(), "failover_priority": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, ValidateFunc: validation.IntAtLeast(0), }, "zone_redundant": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, @@ -217,13 +215,13 @@ func resourceCosmosDbAccount() *schema.Resource { }, "capabilities": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.StringInSlice([]string{ @@ -247,23 +245,23 @@ func resourceCosmosDbAccount() *schema.Resource { }, "is_virtual_network_filter_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, "virtual_network_rule": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: azure.ValidateResourceID, }, "ignore_missing_vnet_service_endpoint": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, @@ -273,19 +271,19 @@ func resourceCosmosDbAccount() *schema.Resource { }, "enable_multiple_write_locations": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, "access_key_metadata_writes_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, "mongo_server_version": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, Computed: true, @@ -297,28 +295,28 @@ func resourceCosmosDbAccount() *schema.Resource { }, "network_acl_bypass_for_azure_services": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, "network_acl_bypass_ids": { - Type: schema.TypeList, - Optional: true, Elem: &schema.Schema{ - Type: schema.TypeString, + Type: pluginsdk.TypeList, + Optional: true, Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: azure.ValidateResourceID, }, }, "backup": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, Computed: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ @@ -328,14 +326,14 @@ func resourceCosmosDbAccount() *schema.Resource { }, "interval_in_minutes": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validation.IntBetween(60, 1440), }, "retention_in_hours": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validation.IntBetween(8, 720), @@ -345,14 +343,14 @@ func resourceCosmosDbAccount() *schema.Resource { }, "identity": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ // only system assigned identity is supported "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ string(documentdb.ResourceIdentityTypeSystemAssigned), @@ -360,12 +358,12 @@ func resourceCosmosDbAccount() *schema.Resource { }, "principal_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "tenant_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -376,84 +374,84 @@ func resourceCosmosDbAccount() *schema.Resource { // computed "endpoint": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "read_endpoints": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "write_endpoints": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "primary_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "secondary_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "primary_readonly_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "secondary_readonly_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "primary_master_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, Deprecated: "This property has been renamed to `primary_key` and will be removed in v3.0 of the provider in support of HashiCorp's inclusive language policy which can be found here: https://discuss.hashicorp.com/t/inclusive-language-changes", }, "secondary_master_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, Deprecated: "This property has been renamed to `secondary_key` and will be removed in v3.0 of the provider in support of HashiCorp's inclusive language policy which can be found here: https://discuss.hashicorp.com/t/inclusive-language-changes", }, "primary_readonly_master_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, Deprecated: "This property has been renamed to `primary_readonly_key` and will be removed in v3.0 of the provider in support of HashiCorp's inclusive language policy which can be found here: https://discuss.hashicorp.com/t/inclusive-language-changes", }, "secondary_readonly_master_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, Deprecated: "This property has been renamed to `secondary_readonly_key` and will be removed in v3.0 of the provider in support of HashiCorp's inclusive language policy which can be found here: https://discuss.hashicorp.com/t/inclusive-language-changes", }, "connection_strings": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, Sensitive: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, Sensitive: true, }, }, @@ -463,7 +461,7 @@ func resourceCosmosDbAccount() *schema.Resource { } } -func resourceCosmosDbAccountCreate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbAccountCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.DatabaseClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -595,7 +593,7 @@ func resourceCosmosDbAccountCreate(d *schema.ResourceData, meta interface{}) err return resourceCosmosDbAccountRead(d, meta) } -func resourceCosmosDbAccountUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbAccountUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.DatabaseClient ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -750,7 +748,7 @@ func resourceCosmosDbAccountUpdate(d *schema.ResourceData, meta interface{}) err return resourceCosmosDbAccountRead(d, meta) } -func resourceCosmosDbAccountRead(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbAccountRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.DatabaseClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -926,7 +924,7 @@ func resourceCosmosDbAccountRead(d *schema.ResourceData, meta interface{}) error return tags.FlattenAndSet(d, resp.Tags) } -func resourceCosmosDbAccountDelete(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbAccountDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.DatabaseClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -941,11 +939,11 @@ func resourceCosmosDbAccountDelete(d *schema.ResourceData, meta interface{}) err } // the SDK now will return a `WasNotFound` response even when still deleting - stateConf := &resource.StateChangeConf{ + stateConf := &pluginsdk.StateChangeConf{ Pending: []string{"Deleting"}, Target: []string{"NotFound"}, MinTimeout: 30 * time.Second, - Timeout: d.Timeout(schema.TimeoutDelete), + Timeout: d.Timeout(pluginsdk.TimeoutDelete), Refresh: func() (interface{}, string, error) { resp, err2 := client.Get(ctx, id.ResourceGroup, id.Name) if err2 != nil { @@ -959,14 +957,14 @@ func resourceCosmosDbAccountDelete(d *schema.ResourceData, meta interface{}) err }, } - if _, err = stateConf.WaitForState(); err != nil { + if _, err = stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for CosmosDB Account %q (Resource Group %q) to be deleted: %+v", id.Name, id.ResourceGroup, err) } return nil } -func resourceCosmosDbAccountApiUpsert(client *documentdb.DatabaseAccountsClient, ctx context.Context, resourceGroup string, name string, account documentdb.DatabaseAccountCreateUpdateParameters, d *schema.ResourceData) (*documentdb.DatabaseAccountGetResults, error) { +func resourceCosmosDbAccountApiUpsert(client *documentdb.DatabaseAccountsClient, ctx context.Context, resourceGroup string, name string, account documentdb.DatabaseAccountCreateUpdateParameters, d *pluginsdk.ResourceData) (*documentdb.DatabaseAccountGetResults, error) { future, err := client.CreateOrUpdate(ctx, resourceGroup, name, account) if err != nil { return nil, fmt.Errorf("Error creating/updating CosmosDB Account %q (Resource Group %q): %+v", name, resourceGroup, err) @@ -977,7 +975,7 @@ func resourceCosmosDbAccountApiUpsert(client *documentdb.DatabaseAccountsClient, } // if a replication location is added or removed it can take some time to provision - stateConf := &resource.StateChangeConf{ + stateConf := &pluginsdk.StateChangeConf{ Pending: []string{"Creating", "Updating", "Deleting", "Initializing"}, Target: []string{"Succeeded"}, MinTimeout: 30 * time.Second, @@ -1014,12 +1012,12 @@ func resourceCosmosDbAccountApiUpsert(client *documentdb.DatabaseAccountsClient, } if d.IsNewResource() { - stateConf.Timeout = d.Timeout(schema.TimeoutCreate) + stateConf.Timeout = d.Timeout(pluginsdk.TimeoutCreate) } else { - stateConf.Timeout = d.Timeout(schema.TimeoutUpdate) + stateConf.Timeout = d.Timeout(pluginsdk.TimeoutUpdate) } - resp, err := stateConf.WaitForState() + resp, err := stateConf.WaitForStateContext(ctx) if err != nil { return nil, fmt.Errorf("Error waiting for the CosmosDB Account %q (Resource Group %q) to provision: %+v", name, resourceGroup, err) } @@ -1028,7 +1026,7 @@ func resourceCosmosDbAccountApiUpsert(client *documentdb.DatabaseAccountsClient, return &r, nil } -func expandAzureRmCosmosDBAccountConsistencyPolicy(d *schema.ResourceData) *documentdb.ConsistencyPolicy { +func expandAzureRmCosmosDBAccountConsistencyPolicy(d *pluginsdk.ResourceData) *documentdb.ConsistencyPolicy { i := d.Get("consistency_policy").([]interface{}) if len(i) == 0 || i[0] == nil { return nil @@ -1056,9 +1054,9 @@ func expandAzureRmCosmosDBAccountConsistencyPolicy(d *schema.ResourceData) *docu return &policy } -func expandAzureRmCosmosDBAccountGeoLocations(d *schema.ResourceData) ([]documentdb.Location, error) { +func expandAzureRmCosmosDBAccountGeoLocations(d *pluginsdk.ResourceData) ([]documentdb.Location, error) { locations := make([]documentdb.Location, 0) - for _, l := range d.Get("geo_location").(*schema.Set).List() { + for _, l := range d.Get("geo_location").(*pluginsdk.Set).List() { data := l.(map[string]interface{}) location := documentdb.Location{ @@ -1098,8 +1096,8 @@ func expandAzureRmCosmosDBAccountGeoLocations(d *schema.ResourceData) ([]documen return locations, nil } -func expandAzureRmCosmosDBAccountCapabilities(d *schema.ResourceData) *[]documentdb.Capability { - capabilities := d.Get("capabilities").(*schema.Set).List() +func expandAzureRmCosmosDBAccountCapabilities(d *pluginsdk.ResourceData) *[]documentdb.Capability { + capabilities := d.Get("capabilities").(*pluginsdk.Set).List() s := make([]documentdb.Capability, 0) for _, c := range capabilities { @@ -1110,8 +1108,8 @@ func expandAzureRmCosmosDBAccountCapabilities(d *schema.ResourceData) *[]documen return &s } -func expandAzureRmCosmosDBAccountVirtualNetworkRules(d *schema.ResourceData) *[]documentdb.VirtualNetworkRule { - virtualNetworkRules := d.Get("virtual_network_rule").(*schema.Set).List() +func expandAzureRmCosmosDBAccountVirtualNetworkRules(d *pluginsdk.ResourceData) *[]documentdb.VirtualNetworkRule { + virtualNetworkRules := d.Get("virtual_network_rule").(*pluginsdk.Set).List() s := make([]documentdb.VirtualNetworkRule, len(virtualNetworkRules)) for i, r := range virtualNetworkRules { @@ -1137,8 +1135,8 @@ func flattenAzureRmCosmosDBAccountConsistencyPolicy(policy *documentdb.Consisten return []interface{}{result} } -func flattenAzureRmCosmosDBAccountGeoLocations(account *documentdb.DatabaseAccountGetProperties) *schema.Set { - locationSet := schema.Set{ +func flattenAzureRmCosmosDBAccountGeoLocations(account *documentdb.DatabaseAccountGetProperties) *pluginsdk.Set { + locationSet := pluginsdk.Set{ F: resourceAzureRMCosmosDBAccountGeoLocationHash, } if account == nil { @@ -1186,8 +1184,8 @@ func isServerlessCapacityMode(accResp documentdb.DatabaseAccountGetResults) bool return false } -func flattenAzureRmCosmosDBAccountCapabilities(capabilities *[]documentdb.Capability) *schema.Set { - s := schema.Set{ +func flattenAzureRmCosmosDBAccountCapabilities(capabilities *[]documentdb.Capability) *pluginsdk.Set { + s := pluginsdk.Set{ F: resourceAzureRMCosmosDBAccountCapabilitiesHash, } @@ -1203,8 +1201,8 @@ func flattenAzureRmCosmosDBAccountCapabilities(capabilities *[]documentdb.Capabi return &s } -func flattenAzureRmCosmosDBAccountVirtualNetworkRules(rules *[]documentdb.VirtualNetworkRule) *schema.Set { - results := schema.Set{ +func flattenAzureRmCosmosDBAccountVirtualNetworkRules(rules *[]documentdb.VirtualNetworkRule) *pluginsdk.Set { + results := pluginsdk.Set{ F: resourceAzureRMCosmosDBAccountVirtualNetworkRuleHash, } @@ -1231,7 +1229,7 @@ func resourceAzureRMCosmosDBAccountGeoLocationHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%s-%d", location, priority)) } - return schema.HashString(buf.String()) + return pluginsdk.HashString(buf.String()) } func resourceAzureRMCosmosDBAccountCapabilitiesHash(v interface{}) int { @@ -1241,7 +1239,7 @@ func resourceAzureRMCosmosDBAccountCapabilitiesHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) } - return schema.HashString(buf.String()) + return pluginsdk.HashString(buf.String()) } func resourceAzureRMCosmosDBAccountVirtualNetworkRuleHash(v interface{}) int { @@ -1251,7 +1249,7 @@ func resourceAzureRMCosmosDBAccountVirtualNetworkRuleHash(v interface{}) int { buf.WriteString(strings.ToLower(m["id"].(string))) } - return schema.HashString(buf.String()) + return pluginsdk.HashString(buf.String()) } func expandCosmosdbAccountBackup(input []interface{}) (documentdb.BasicBackupPolicy, error) { diff --git a/azurerm/internal/services/cosmos/cosmosdb_account_resource_failover_test.go b/azurerm/internal/services/cosmos/cosmosdb_account_resource_failover_test.go index df2b2a684df4..84308b3a57a1 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_account_resource_failover_test.go +++ b/azurerm/internal/services/cosmos/cosmosdb_account_resource_failover_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -13,10 +12,10 @@ func TestAccCosmosDBAccount_failover_boundedStaleness(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.failover_boundedStaleness(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("kind").HasValue("GlobalDocumentDB"), ), @@ -28,10 +27,10 @@ func TestAccCosmosDBAccount_failover_boundedStalenessComplete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.failover_boundedStalenessComplete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -42,10 +41,10 @@ func TestAccCosmosDBAccount_failover_eventualConsistency(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.failover_eventualConsistency(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -56,10 +55,10 @@ func TestAccCosmosDBAccount_failover_mongoDB(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.failover_mongoDB(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("kind").HasValue("MongoDB"), ), @@ -71,10 +70,10 @@ func TestAccCosmosDBAccount_failover_session(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.failover_session(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -85,10 +84,10 @@ func TestAccCosmosDBAccount_failover_strong(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.failover_strong(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -99,10 +98,10 @@ func TestAccCosmosDBAccount_failover_geoReplicated(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.failover_geoReplicated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/cosmos/cosmosdb_account_resource_test.go b/azurerm/internal/services/cosmos/cosmosdb_account_resource_test.go index 440d82ae6b38..4c2103dbb651 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_account_resource_test.go +++ b/azurerm/internal/services/cosmos/cosmosdb_account_resource_test.go @@ -7,13 +7,12 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -88,10 +87,10 @@ func testAccCosmosDBAccount_public_network_access_enabled(t *testing.T, kind doc data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.network_access_enabled(data, kind, consistency), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, consistency, 1), ), }, @@ -103,10 +102,10 @@ func TestAccCosmosDBAccount_keyVaultUri(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.key_vault_uri(data, documentdb.MongoDB, documentdb.Strong), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Strong, 1), ), }, @@ -118,17 +117,17 @@ func TestAccCosmosDBAccount_keyVaultUriUpdateConsistancy(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.key_vault_uri(data, documentdb.MongoDB, documentdb.Strong), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Strong, 1), ), }, data.ImportStep(), { Config: r.key_vault_uri(data, documentdb.MongoDB, documentdb.Session), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Session, 1), ), }, @@ -140,10 +139,10 @@ func testAccCosmosDBAccount_basicWith(t *testing.T, kind documentdb.DatabaseAcco data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data, kind, consistency), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, consistency, 1), ), }, @@ -155,10 +154,10 @@ func testAccCosmosDBAccount_basicMongoDBWith(t *testing.T, consistency documentd data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicMongoDB(data, consistency), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, consistency, 1), ), }, @@ -170,10 +169,10 @@ func TestAccCosmosDBAccount_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data, "GlobalDocumentDB", documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Eventual, 1), ), }, @@ -192,7 +191,7 @@ func TestAccCosmosDBAccount_updateConsistency_mongo(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicMongoDB(data, documentdb.Strong), Check: checkAccCosmosDBAccount_basic(data, documentdb.Strong, 1), @@ -230,7 +229,7 @@ func testAccCosmosDBAccount_updateConsistency(t *testing.T, kind documentdb.Data data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data, kind, documentdb.Strong), Check: checkAccCosmosDBAccount_basic(data, documentdb.Strong, 1), @@ -268,10 +267,10 @@ func TestAccCosmosDBAccount_complete_mongo(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.completeMongoDB(data, documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Eventual, 3), ), }, @@ -291,10 +290,10 @@ func testAccCosmosDBAccount_completeWith(t *testing.T, kind documentdb.DatabaseA data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.complete(data, kind, documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Eventual, 3), ), }, @@ -306,10 +305,10 @@ func TestAccCosmosDBAccount_completeZoneRedundant_mongo(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.zoneRedundantMongoDB(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -329,10 +328,10 @@ func testAccCosmosDBAccount_zoneRedundantWith(t *testing.T, kind documentdb.Data data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.zoneRedundant(data, kind), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -344,17 +343,17 @@ func TestAccCosmosDBAccount_zoneRedundant_update_mongo(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicMongoDB(data, documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Eventual, 1), ), }, data.ImportStep(), { Config: r.zoneRedundantMongoDBUpdate(data, documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Eventual, 2), ), }, @@ -366,31 +365,31 @@ func TestAccCosmosDBAccount_update_mongo(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicMongoDB(data, documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Eventual, 1), ), }, data.ImportStep(), { Config: r.completeMongoDB(data, documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Eventual, 3), ), }, data.ImportStep(), { Config: r.completeUpdatedMongoDB(data, documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Eventual, 3), ), }, data.ImportStep(), { Config: r.basicWithResourcesMongoDB(data, documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( // checkAccCosmosDBAccount_basic(data, documentdb.Eventual, 1), ), }, @@ -410,31 +409,31 @@ func testAccCosmosDBAccount_updateWith(t *testing.T, kind documentdb.DatabaseAcc data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data, kind, documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Eventual, 1), ), }, data.ImportStep(), { Config: r.complete(data, kind, documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Eventual, 3), ), }, data.ImportStep(), { Config: r.completeUpdated(data, kind, documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Eventual, 3), ), }, data.ImportStep(), { Config: r.basicWithResources(data, kind, documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( // checkAccCosmosDBAccount_basic(data, documentdb.Eventual, 1), ), }, @@ -486,10 +485,10 @@ func testAccCosmosDBAccount_capabilitiesWith(t *testing.T, kind documentdb.Datab data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.capabilities(data, kind, capabilities), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Strong, 1), ), }, @@ -501,17 +500,17 @@ func TestAccCosmosDBAccount_capabilitiesAdd(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.capabilities(data, documentdb.GlobalDocumentDB, []string{"EnableCassandra"}), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Strong, 1), ), }, data.ImportStep(), { Config: r.capabilities(data, documentdb.GlobalDocumentDB, []string{"EnableCassandra", "EnableAggregationPipeline"}), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Strong, 1), ), }, @@ -523,17 +522,17 @@ func TestAccCosmosDBAccount_capabilitiesUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.capabilities(data, documentdb.GlobalDocumentDB, []string{"EnableCassandra"}), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Strong, 1), ), }, data.ImportStep(), { Config: r.capabilities(data, documentdb.GlobalDocumentDB, []string{"EnableTable", "EnableAggregationPipeline"}), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Strong, 1), ), }, @@ -545,24 +544,24 @@ func TestAccCosmosDBAccount_geoLocationsUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data, "GlobalDocumentDB", documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Eventual, 1), ), }, data.ImportStep(), { Config: r.geoLocationUpdate(data, "GlobalDocumentDB", documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Eventual, 2), ), }, data.ImportStep(), { Config: r.basic(data, "GlobalDocumentDB", documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Eventual, 1), ), }, @@ -574,10 +573,10 @@ func TestAccCosmosDBAccount_freeTier(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.freeTier(data, "GlobalDocumentDB", documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Eventual, 1), check.That(data.ResourceName).Key("enable_free_tier").HasValue("true"), ), @@ -590,10 +589,10 @@ func TestAccCosmosDBAccount_analyticalStorage(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.analyticalStorage(data, "GlobalDocumentDB", documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( checkAccCosmosDBAccount_basic(data, documentdb.Eventual, 1), check.That(data.ResourceName).Key("analytical_storage_enabled").HasValue("true"), ), @@ -606,10 +605,10 @@ func TestAccCosmosDBAccount_vNetFilters(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.vNetFilters(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("is_virtual_network_filter_enabled").HasValue("true"), check.That(data.ResourceName).Key("virtual_network_rule.#").HasValue("2"), @@ -623,17 +622,17 @@ func TestAccCosmosDBAccount_identity(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicMongoDB(data, documentdb.Session), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.systemAssignedIdentity(data, documentdb.Session), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("identity.0.principal_id").Exists(), check.That(data.ResourceName).Key("identity.0.tenant_id").Exists(), @@ -642,7 +641,7 @@ func TestAccCosmosDBAccount_identity(t *testing.T) { data.ImportStep(), { Config: r.basicMongoDB(data, documentdb.Session), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -654,10 +653,10 @@ func TestAccCosmosDBAccount_backup(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data, documentdb.GlobalDocumentDB, documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("backup.0.type").HasValue("Periodic"), check.That(data.ResourceName).Key("backup.0.interval_in_minutes").HasValue("240"), @@ -667,14 +666,14 @@ func TestAccCosmosDBAccount_backup(t *testing.T) { data.ImportStep(), { Config: r.basicWithBackupPeriodic(data, documentdb.GlobalDocumentDB, documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.basicWithBackupPeriodicUpdate(data, documentdb.GlobalDocumentDB, documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("backup.0.type").HasValue("Periodic"), ), @@ -687,10 +686,10 @@ func TestAccCosmosDBAccount_backupContinuous(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicWithBackupContinuous(data, documentdb.GlobalDocumentDB, documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -702,24 +701,24 @@ func TestAccCosmosDBAccount_networkBypass(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data, documentdb.GlobalDocumentDB, documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.basicWithNetworkBypass(data, documentdb.GlobalDocumentDB, documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.basicWithoutNetworkBypass(data, documentdb.GlobalDocumentDB, documentdb.Eventual), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -731,10 +730,10 @@ func TestAccCosmosDBAccount_mongoVersion32(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicMongoDBVersion32(data, documentdb.Session), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -746,10 +745,10 @@ func TestAccCosmosDBAccount_mongoVersion40(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_account", "test") r := CosmosDBAccountResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicMongoDBVersion40(data, documentdb.Session), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -757,7 +756,7 @@ func TestAccCosmosDBAccount_mongoVersion40(t *testing.T) { }) } -func (t CosmosDBAccountResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t CosmosDBAccountResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.DatabaseAccountID(state.ID) if err != nil { return nil, err @@ -1639,8 +1638,8 @@ resource "azurerm_cosmosdb_account" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, string(consistency)) } -func checkAccCosmosDBAccount_basic(data acceptance.TestData, consistency documentdb.DefaultConsistencyLevel, locationCount int) resource.TestCheckFunc { - return resource.ComposeTestCheckFunc( +func checkAccCosmosDBAccount_basic(data acceptance.TestData, consistency documentdb.DefaultConsistencyLevel, locationCount int) acceptance.TestCheckFunc { + return acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("name").Exists(), check.That(data.ResourceName).Key("resource_group_name").Exists(), check.That(data.ResourceName).Key("location").HasValue(azure.NormalizeLocation(data.Locations.Primary)), diff --git a/azurerm/internal/services/cosmos/cosmosdb_cassandra_keyspace_resource.go b/azurerm/internal/services/cosmos/cosmosdb_cassandra_keyspace_resource.go index ac85525df665..2216fe030240 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_cassandra_keyspace_resource.go +++ b/azurerm/internal/services/cosmos/cosmosdb_cassandra_keyspace_resource.go @@ -5,11 +5,8 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -17,12 +14,13 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/migration" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceCosmosDbCassandraKeyspace() *schema.Resource { - return &schema.Resource{ +func resourceCosmosDbCassandraKeyspace() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceCosmosDbCassandraKeyspaceCreate, Read: resourceCosmosDbCassandraKeyspaceRead, Update: resourceCosmosDbCassandraKeyspaceUpdate, @@ -36,16 +34,16 @@ func resourceCosmosDbCassandraKeyspace() *schema.Resource { 0: migration.CassandraKeyspaceV0ToV1{}, }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosEntityName, @@ -54,14 +52,14 @@ func resourceCosmosDbCassandraKeyspace() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "account_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosAccountName, }, "throughput": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validate.CosmosThroughput, @@ -72,7 +70,7 @@ func resourceCosmosDbCassandraKeyspace() *schema.Resource { } } -func resourceCosmosDbCassandraKeyspaceCreate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbCassandraKeyspaceCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.CassandraClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -136,7 +134,7 @@ func resourceCosmosDbCassandraKeyspaceCreate(d *schema.ResourceData, meta interf return resourceCosmosDbCassandraKeyspaceRead(d, meta) } -func resourceCosmosDbCassandraKeyspaceUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbCassandraKeyspaceUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.CassandraClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -187,7 +185,7 @@ func resourceCosmosDbCassandraKeyspaceUpdate(d *schema.ResourceData, meta interf return resourceCosmosDbCassandraKeyspaceRead(d, meta) } -func resourceCosmosDbCassandraKeyspaceRead(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbCassandraKeyspaceRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.CassandraClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -231,7 +229,7 @@ func resourceCosmosDbCassandraKeyspaceRead(d *schema.ResourceData, meta interfac return nil } -func resourceCosmosDbCassandraKeyspaceDelete(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbCassandraKeyspaceDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.CassandraClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/cosmos/cosmosdb_cassandra_keyspace_resource_test.go b/azurerm/internal/services/cosmos/cosmosdb_cassandra_keyspace_resource_test.go index 8dc9177cf20f..ac0a6d92e2f2 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_cassandra_keyspace_resource_test.go +++ b/azurerm/internal/services/cosmos/cosmosdb_cassandra_keyspace_resource_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -22,10 +21,10 @@ func TestAccCosmosDbCassandraKeyspace_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_cassandra_keyspace", "test") r := CosmosDbCassandraKeyspaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -37,10 +36,10 @@ func TestAccCosmosDbCassandraKeyspace_complete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_cassandra_keyspace", "test") r := CosmosDbCassandraKeyspaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.throughput(data, 700), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("throughput").HasValue("700"), ), @@ -53,10 +52,10 @@ func TestAccCosmosDbCassandraKeyspace_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_cassandra_keyspace", "test") r := CosmosDbCassandraKeyspaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.throughput(data, 700), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("throughput").HasValue("700"), ), @@ -64,7 +63,7 @@ func TestAccCosmosDbCassandraKeyspace_update(t *testing.T) { data.ImportStep(), { Config: r.throughput(data, 1700), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("throughput").HasValue("1700"), ), @@ -77,10 +76,10 @@ func TestAccCosmosDbCassandraKeyspace_autoscale(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_cassandra_keyspace", "test") r := CosmosDbCassandraKeyspaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.autoscale(data, 4000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("4000"), ), @@ -88,7 +87,7 @@ func TestAccCosmosDbCassandraKeyspace_autoscale(t *testing.T) { data.ImportStep(), { Config: r.autoscale(data, 5000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("5000"), ), @@ -96,7 +95,7 @@ func TestAccCosmosDbCassandraKeyspace_autoscale(t *testing.T) { data.ImportStep(), { Config: r.autoscale(data, 4000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("4000"), ), @@ -105,7 +104,7 @@ func TestAccCosmosDbCassandraKeyspace_autoscale(t *testing.T) { }) } -func (t CosmosDbCassandraKeyspaceResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t CosmosDbCassandraKeyspaceResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.CassandraKeyspaceID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/cosmos/cosmosdb_cassandra_table_resource.go b/azurerm/internal/services/cosmos/cosmosdb_cassandra_table_resource.go index 23982be70082..8a2d9140578d 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_cassandra_table_resource.go +++ b/azurerm/internal/services/cosmos/cosmosdb_cassandra_table_resource.go @@ -5,23 +5,21 @@ import ( "log" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" - "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/common" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceCosmosDbCassandraTable() *schema.Resource { - return &schema.Resource{ +func resourceCosmosDbCassandraTable() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceCosmosDbCassandraTableCreate, Read: resourceCosmosDbCassandraTableRead, Update: resourceCosmosDbCassandraTableUpdate, @@ -30,37 +28,37 @@ func resourceCosmosDbCassandraTable() *schema.Resource { // TODO: replace this with an importer which validates the ID during import Importer: pluginsdk.DefaultImporter(), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosEntityName, }, "cassandra_keyspace_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CassandraKeyspaceID, }, "default_ttl": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validation.IntAtLeast(-1), }, "analytical_storage_ttl": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, ForceNew: true, Default: -2, @@ -70,7 +68,7 @@ func resourceCosmosDbCassandraTable() *schema.Resource { "schema": common.CassandraTableSchemaPropertySchema(), "throughput": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validate.CosmosThroughput, @@ -81,7 +79,7 @@ func resourceCosmosDbCassandraTable() *schema.Resource { } } -func resourceCosmosDbCassandraTableCreate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbCassandraTableCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.CassandraClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -151,7 +149,7 @@ func resourceCosmosDbCassandraTableCreate(d *schema.ResourceData, meta interface return resourceCosmosDbCassandraTableRead(d, meta) } -func resourceCosmosDbCassandraTableUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbCassandraTableUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.CassandraClient ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -208,7 +206,7 @@ func resourceCosmosDbCassandraTableUpdate(d *schema.ResourceData, meta interface return resourceCosmosDbCassandraTableRead(d, meta) } -func resourceCosmosDbCassandraTableRead(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbCassandraTableRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.CassandraClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) subscriptionId := meta.(*clients.Client).Account.SubscriptionId @@ -268,7 +266,7 @@ func resourceCosmosDbCassandraTableRead(d *schema.ResourceData, meta interface{} return nil } -func resourceCosmosDbCassandraTableDelete(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbCassandraTableDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.CassandraClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -293,7 +291,7 @@ func resourceCosmosDbCassandraTableDelete(d *schema.ResourceData, meta interface return nil } -func expandTableSchema(d *schema.ResourceData) *documentdb.CassandraSchema { +func expandTableSchema(d *pluginsdk.ResourceData) *documentdb.CassandraSchema { i := d.Get("schema").([]interface{}) if len(i) == 0 || i[0] == nil { diff --git a/azurerm/internal/services/cosmos/cosmosdb_cassandra_table_resource_test.go b/azurerm/internal/services/cosmos/cosmosdb_cassandra_table_resource_test.go index d206da66dd33..ffcc4b268b79 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_cassandra_table_resource_test.go +++ b/azurerm/internal/services/cosmos/cosmosdb_cassandra_table_resource_test.go @@ -5,19 +5,18 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) type CosmosDBCassandraTableResource struct { } -func (t CosmosDBCassandraTableResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t CosmosDBCassandraTableResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.CassandraTableID(state.ID) if err != nil { return nil, err @@ -35,11 +34,11 @@ func TestAccCosmosDbCassandraTable_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_cassandra_table", "test") r := CosmosDBCassandraTableResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -51,11 +50,11 @@ func TestAccCosmosDbCassandraTable_analyticalStorageTTL(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_cassandra_table", "test") r := CosmosDBCassandraTableResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.analyticalStorageTTL(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, diff --git a/azurerm/internal/services/cosmos/cosmosdb_gremlin_database_resource.go b/azurerm/internal/services/cosmos/cosmosdb_gremlin_database_resource.go index c09da80a8cd6..635902f3ad11 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_gremlin_database_resource.go +++ b/azurerm/internal/services/cosmos/cosmosdb_gremlin_database_resource.go @@ -5,11 +5,8 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -17,12 +14,13 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/migration" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceCosmosGremlinDatabase() *schema.Resource { - return &schema.Resource{ +func resourceCosmosGremlinDatabase() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceCosmosGremlinDatabaseCreate, Update: resourceCosmosGremlinDatabaseUpdate, Read: resourceCosmosGremlinDatabaseRead, @@ -36,16 +34,16 @@ func resourceCosmosGremlinDatabase() *schema.Resource { 0: migration.GremlinDatabaseV0ToV1{}, }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosEntityName, @@ -54,14 +52,14 @@ func resourceCosmosGremlinDatabase() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "account_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosAccountName, }, "throughput": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validate.CosmosThroughput, @@ -72,7 +70,7 @@ func resourceCosmosGremlinDatabase() *schema.Resource { } } -func resourceCosmosGremlinDatabaseCreate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosGremlinDatabaseCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.GremlinClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -136,7 +134,7 @@ func resourceCosmosGremlinDatabaseCreate(d *schema.ResourceData, meta interface{ return resourceCosmosGremlinDatabaseRead(d, meta) } -func resourceCosmosGremlinDatabaseUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosGremlinDatabaseUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.GremlinClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -191,7 +189,7 @@ func resourceCosmosGremlinDatabaseUpdate(d *schema.ResourceData, meta interface{ return resourceCosmosGremlinDatabaseRead(d, meta) } -func resourceCosmosGremlinDatabaseRead(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosGremlinDatabaseRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.GremlinClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -235,7 +233,7 @@ func resourceCosmosGremlinDatabaseRead(d *schema.ResourceData, meta interface{}) return nil } -func resourceCosmosGremlinDatabaseDelete(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosGremlinDatabaseDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.GremlinClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/cosmos/cosmosdb_gremlin_database_resource_test.go b/azurerm/internal/services/cosmos/cosmosdb_gremlin_database_resource_test.go index 8b4315807b4c..53aa0774e4df 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_gremlin_database_resource_test.go +++ b/azurerm/internal/services/cosmos/cosmosdb_gremlin_database_resource_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -22,10 +21,10 @@ func TestAccCosmosGremlinDatabase_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_gremlin_database", "test") r := CosmosGremlinDatabaseResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -37,10 +36,10 @@ func TestAccCosmosGremlinDatabase_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_gremlin_database", "test") r := CosmosGremlinDatabaseResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -55,10 +54,10 @@ func TestAccCosmosGremlinDatabase_complete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_gremlin_database", "test") r := CosmosGremlinDatabaseResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.complete(data, 700), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("throughput").HasValue("700"), ), @@ -66,7 +65,7 @@ func TestAccCosmosGremlinDatabase_complete(t *testing.T) { data.ImportStep(), { Config: r.complete(data, 1700), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("throughput").HasValue("1700"), ), @@ -79,10 +78,10 @@ func TestAccCosmosGremlinDatabase_autoscale(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_gremlin_database", "test") r := CosmosGremlinDatabaseResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.autoscale(data, 4000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("4000"), ), @@ -90,7 +89,7 @@ func TestAccCosmosGremlinDatabase_autoscale(t *testing.T) { data.ImportStep(), { Config: r.autoscale(data, 5000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("5000"), ), @@ -98,7 +97,7 @@ func TestAccCosmosGremlinDatabase_autoscale(t *testing.T) { data.ImportStep(), { Config: r.autoscale(data, 4000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("4000"), ), @@ -107,7 +106,7 @@ func TestAccCosmosGremlinDatabase_autoscale(t *testing.T) { }) } -func (t CosmosGremlinDatabaseResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t CosmosGremlinDatabaseResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.GremlinDatabaseID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/cosmos/cosmosdb_gremlin_graph_resource.go b/azurerm/internal/services/cosmos/cosmosdb_gremlin_graph_resource.go index b3f9fbe547bc..708546607e45 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_gremlin_graph_resource.go +++ b/azurerm/internal/services/cosmos/cosmosdb_gremlin_graph_resource.go @@ -6,12 +6,8 @@ import ( "strings" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -19,13 +15,15 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/migration" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceCosmosDbGremlinGraph() *schema.Resource { - return &schema.Resource{ +func resourceCosmosDbGremlinGraph() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceCosmosDbGremlinGraphCreate, Read: resourceCosmosDbGremlinGraphRead, Update: resourceCosmosDbGremlinGraphUpdate, @@ -39,16 +37,16 @@ func resourceCosmosDbGremlinGraph() *schema.Resource { 0: migration.GremlinGraphV0ToV1{}, }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosEntityName, @@ -57,27 +55,27 @@ func resourceCosmosDbGremlinGraph() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "account_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosAccountName, }, "database_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosEntityName, }, "default_ttl": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, }, "throughput": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validate.CosmosThroughput, @@ -86,20 +84,28 @@ func resourceCosmosDbGremlinGraph() *schema.Resource { "autoscale_settings": common.DatabaseAutoscaleSettingsSchema(), "partition_key_path": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, + "partition_key_version": { + Type: pluginsdk.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(1, 2), + }, + "index_policy": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Type: pluginsdk.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "automatic": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, @@ -107,7 +113,7 @@ func resourceCosmosDbGremlinGraph() *schema.Resource { // case change in 2021-01-15, issue https://github.com/Azure/azure-rest-api-specs/issues/14051 // todo: change to SDK constants and remove translation code in 3.0 "indexing_mode": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, DiffSuppressFunc: suppress.CaseDifference, // Open issue https://github.com/Azure/azure-sdk-for-go/issues/6603 ValidateFunc: validation.StringInSlice([]string{ @@ -118,24 +124,30 @@ func resourceCosmosDbGremlinGraph() *schema.Resource { }, "included_paths": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Computed: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, - Set: schema.HashString, + Set: pluginsdk.HashString, }, "excluded_paths": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Computed: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, - Set: schema.HashString, + Set: pluginsdk.HashString, }, + + "composite_index": common.CosmosDbIndexingPolicyCompositeIndexSchema(), + + "spatial_index": common.CosmosDbIndexingPolicySpatialIndexSchema(), }, }, }, @@ -143,17 +155,17 @@ func resourceCosmosDbGremlinGraph() *schema.Resource { "conflict_resolution_policy": common.ConflictResolutionPolicy(), "unique_key": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "paths": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, }, @@ -164,7 +176,7 @@ func resourceCosmosDbGremlinGraph() *schema.Resource { } } -func resourceCosmosDbGremlinGraphCreate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbGremlinGraphCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.GremlinClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -202,10 +214,14 @@ func resourceCosmosDbGremlinGraphCreate(d *schema.ResourceData, meta interface{} if partitionkeypaths != "" { db.GremlinGraphCreateUpdateProperties.Resource.PartitionKey = &documentdb.ContainerPartitionKey{ Paths: &[]string{partitionkeypaths}, + Kind: documentdb.PartitionKindHash, + } + if partitionKeyVersion, ok := d.GetOk("partition_key_version"); ok { + db.GremlinGraphCreateUpdateProperties.Resource.PartitionKey.Version = utils.Int32(int32(partitionKeyVersion.(int))) } } - if keys := expandAzureRmCosmosDbGremlinGraphUniqueKeys(d.Get("unique_key").(*schema.Set)); keys != nil { + if keys := expandAzureRmCosmosDbGremlinGraphUniqueKeys(d.Get("unique_key").(*pluginsdk.Set)); keys != nil { db.GremlinGraphCreateUpdateProperties.Resource.UniqueKeyPolicy = &documentdb.UniqueKeyPolicy{ UniqueKeys: keys, } @@ -250,7 +266,7 @@ func resourceCosmosDbGremlinGraphCreate(d *schema.ResourceData, meta interface{} return resourceCosmosDbGremlinGraphRead(d, meta) } -func resourceCosmosDbGremlinGraphUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbGremlinGraphUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.GremlinClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -280,10 +296,15 @@ func resourceCosmosDbGremlinGraphUpdate(d *schema.ResourceData, meta interface{} if partitionkeypaths != "" { db.GremlinGraphCreateUpdateProperties.Resource.PartitionKey = &documentdb.ContainerPartitionKey{ Paths: &[]string{partitionkeypaths}, + Kind: documentdb.PartitionKindHash, + } + + if partitionKeyVersion, ok := d.GetOk("partition_key_version"); ok { + db.GremlinGraphCreateUpdateProperties.Resource.PartitionKey.Version = utils.Int32(int32(partitionKeyVersion.(int))) } } - if keys := expandAzureRmCosmosDbGremlinGraphUniqueKeys(d.Get("unique_key").(*schema.Set)); keys != nil { + if keys := expandAzureRmCosmosDbGremlinGraphUniqueKeys(d.Get("unique_key").(*pluginsdk.Set)); keys != nil { db.GremlinGraphCreateUpdateProperties.Resource.UniqueKeyPolicy = &documentdb.UniqueKeyPolicy{ UniqueKeys: keys, } @@ -320,7 +341,7 @@ func resourceCosmosDbGremlinGraphUpdate(d *schema.ResourceData, meta interface{} return resourceCosmosDbGremlinGraphRead(d, meta) } -func resourceCosmosDbGremlinGraphRead(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbGremlinGraphRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.GremlinClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -356,6 +377,10 @@ func resourceCosmosDbGremlinGraphRead(d *schema.ResourceData, meta interface{}) d.Set("partition_key_path", (*paths)[0]) } } + + if version := pk.Version; version != nil { + d.Set("partition_key_version", version) + } } if ip := props.IndexingPolicy; ip != nil { @@ -397,7 +422,7 @@ func resourceCosmosDbGremlinGraphRead(d *schema.ResourceData, meta interface{}) return nil } -func resourceCosmosDbGremlinGraphDelete(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbGremlinGraphDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.GremlinClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -421,7 +446,7 @@ func resourceCosmosDbGremlinGraphDelete(d *schema.ResourceData, meta interface{} return nil } -func expandAzureRmCosmosDbGrelinGraphIndexingPolicy(d *schema.ResourceData) *documentdb.IndexingPolicy { +func expandAzureRmCosmosDbGrelinGraphIndexingPolicy(d *pluginsdk.ResourceData) *documentdb.IndexingPolicy { i := d.Get("index_policy").([]interface{}) if len(i) == 0 || i[0] == nil { return nil @@ -434,6 +459,11 @@ func expandAzureRmCosmosDbGrelinGraphIndexingPolicy(d *schema.ResourceData) *doc IncludedPaths: expandAzureRmCosmosDbGrelimGraphIncludedPath(input), ExcludedPaths: expandAzureRmCosmosDbGremlinGraphExcludedPath(input), } + if v, ok := input["composite_index"].([]interface{}); ok { + policy.CompositeIndexes = common.ExpandAzureRmCosmosDBIndexingPolicyCompositeIndexes(v) + } + + policy.SpatialIndexes = common.ExpandAzureRmCosmosDBIndexingPolicySpatialIndexes(input["spatial_index"].([]interface{})) if automatic, ok := input["automatic"].(bool); ok { policy.Automatic = utils.Bool(automatic) @@ -443,7 +473,7 @@ func expandAzureRmCosmosDbGrelinGraphIndexingPolicy(d *schema.ResourceData) *doc } func expandAzureRmCosmosDbGrelimGraphIncludedPath(input map[string]interface{}) *[]documentdb.IncludedPath { - includedPath := input["included_paths"].(*schema.Set).List() + includedPath := input["included_paths"].(*pluginsdk.Set).List() paths := make([]documentdb.IncludedPath, len(includedPath)) for i, pathConfig := range includedPath { @@ -458,7 +488,7 @@ func expandAzureRmCosmosDbGrelimGraphIncludedPath(input map[string]interface{}) } func expandAzureRmCosmosDbGremlinGraphExcludedPath(input map[string]interface{}) *[]documentdb.ExcludedPath { - excludedPath := input["excluded_paths"].(*schema.Set).List() + excludedPath := input["excluded_paths"].(*pluginsdk.Set).List() paths := make([]documentdb.ExcludedPath, len(excludedPath)) for i, pathConfig := range excludedPath { @@ -472,7 +502,7 @@ func expandAzureRmCosmosDbGremlinGraphExcludedPath(input map[string]interface{}) return &paths } -func expandAzureRmCosmosDbGremlinGraphUniqueKeys(s *schema.Set) *[]documentdb.UniqueKey { +func expandAzureRmCosmosDbGremlinGraphUniqueKeys(s *pluginsdk.Set) *[]documentdb.UniqueKey { i := s.List() if len(i) == 0 || i[0] == nil { return nil @@ -482,7 +512,7 @@ func expandAzureRmCosmosDbGremlinGraphUniqueKeys(s *schema.Set) *[]documentdb.Un for _, k := range i { key := k.(map[string]interface{}) - paths := key["paths"].(*schema.Set).List() + paths := key["paths"].(*pluginsdk.Set).List() if len(paths) == 0 { continue } @@ -503,8 +533,10 @@ func flattenAzureRmCosmosDBGremlinGraphIndexingPolicy(input *documentdb.Indexing indexPolicy["automatic"] = input.Automatic indexPolicy["indexing_mode"] = strings.Title(string(input.IndexingMode)) - indexPolicy["included_paths"] = schema.NewSet(schema.HashString, flattenAzureRmCosmosDBGremlinGraphIncludedPaths(input.IncludedPaths)) - indexPolicy["excluded_paths"] = schema.NewSet(schema.HashString, flattenAzureRmCosmosDBGremlinGraphExcludedPaths(input.ExcludedPaths)) + indexPolicy["included_paths"] = pluginsdk.NewSet(pluginsdk.HashString, flattenAzureRmCosmosDBGremlinGraphIncludedPaths(input.IncludedPaths)) + indexPolicy["excluded_paths"] = pluginsdk.NewSet(pluginsdk.HashString, flattenAzureRmCosmosDBGremlinGraphExcludedPaths(input.ExcludedPaths)) + indexPolicy["composite_index"] = common.FlattenCosmosDBIndexingPolicyCompositeIndexes(input.CompositeIndexes) + indexPolicy["spatial_index"] = common.FlattenCosmosDBIndexingPolicySpatialIndexes(input.SpatialIndexes) return []interface{}{indexPolicy} } diff --git a/azurerm/internal/services/cosmos/cosmosdb_gremlin_graph_resource_test.go b/azurerm/internal/services/cosmos/cosmosdb_gremlin_graph_resource_test.go index b0cda4e60133..6661c4e050b1 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_gremlin_graph_resource_test.go +++ b/azurerm/internal/services/cosmos/cosmosdb_gremlin_graph_resource_test.go @@ -5,12 +5,11 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -21,10 +20,10 @@ func TestAccCosmosDbGremlinGraph_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_gremlin_graph", "test") r := CosmosGremlinGraphResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -36,10 +35,10 @@ func TestAccCosmosDbGremlinGraph_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_gremlin_graph", "test") r := CosmosGremlinGraphResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -54,10 +53,10 @@ func TestAccCosmosDbGremlinGraph_customConflictResolutionPolicy(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_gremlin_graph", "test") r := CosmosGremlinGraphResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.customConflictResolutionPolicy(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -69,10 +68,31 @@ func TestAccCosmosDbGremlinGraph_indexPolicy(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_gremlin_graph", "test") r := CosmosGremlinGraphResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.indexPolicy(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.indexPolicyCompositeIndex(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.indexPolicySpatialIndex(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -84,10 +104,10 @@ func TestAccCosmosDbGremlinGraph_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_gremlin_graph", "test") r := CosmosGremlinGraphResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.update(data, 700, 900), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("throughput").HasValue("700"), check.That(data.ResourceName).Key("default_ttl").HasValue("900"), @@ -96,7 +116,7 @@ func TestAccCosmosDbGremlinGraph_update(t *testing.T) { data.ImportStep(), { Config: r.update(data, 1700, 1900), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("throughput").HasValue("1700"), check.That(data.ResourceName).Key("default_ttl").HasValue("1900"), @@ -110,10 +130,10 @@ func TestAccCosmosDbGremlinGraph_autoscale(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_gremlin_graph", "test") r := CosmosGremlinGraphResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.autoscale(data, 4000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("4000"), ), @@ -121,7 +141,7 @@ func TestAccCosmosDbGremlinGraph_autoscale(t *testing.T) { data.ImportStep(), { Config: r.autoscale(data, 5000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("5000"), ), @@ -129,7 +149,7 @@ func TestAccCosmosDbGremlinGraph_autoscale(t *testing.T) { data.ImportStep(), { Config: r.autoscale(data, 4000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("4000"), ), @@ -138,7 +158,24 @@ func TestAccCosmosDbGremlinGraph_autoscale(t *testing.T) { }) } -func (t CosmosGremlinGraphResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func TestAccCosmosDbGremlinGraph_partition_key_version(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_cosmosdb_gremlin_graph", "test") + r := CosmosGremlinGraphResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + + Config: r.partition_key_version(data, 2), + Check: acceptance.ComposeAggregateTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("partition_key_version").HasValue("2"), + ), + }, + data.ImportStep(), + }) +} + +func (t CosmosGremlinGraphResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.GremlinGraphID(state.ID) if err != nil { return nil, err @@ -163,13 +200,6 @@ resource "azurerm_cosmosdb_gremlin_graph" "test" { database_name = azurerm_cosmosdb_gremlin_database.test.name partition_key_path = "/test" throughput = 400 - - index_policy { - automatic = true - indexing_mode = "Consistent" - included_paths = ["/*"] - excluded_paths = ["/\"_etag\"/?"] - } } `, CosmosGremlinDatabaseResource{}.basic(data), data.RandomInteger) } @@ -184,13 +214,6 @@ resource "azurerm_cosmosdb_gremlin_graph" "import" { account_name = azurerm_cosmosdb_account.test.name database_name = azurerm_cosmosdb_gremlin_database.test.name partition_key_path = azurerm_cosmosdb_gremlin_graph.test.partition_key_path - - index_policy { - automatic = true - indexing_mode = "Consistent" - included_paths = ["/*"] - excluded_paths = ["/\"_etag\"/?"] - } } `, r.basic(data)) } @@ -247,6 +270,108 @@ resource "azurerm_cosmosdb_gremlin_graph" "test" { `, CosmosGremlinDatabaseResource{}.basic(data), data.RandomInteger) } +func (CosmosGremlinGraphResource) indexPolicyCompositeIndex(data acceptance.TestData) string { + return fmt.Sprintf(` +%[1]s + +resource "azurerm_cosmosdb_gremlin_graph" "test" { + name = "acctest-CGRPC-%[2]d" + resource_group_name = azurerm_cosmosdb_account.test.resource_group_name + account_name = azurerm_cosmosdb_account.test.name + database_name = azurerm_cosmosdb_gremlin_database.test.name + partition_key_path = "/test" + throughput = 400 + + index_policy { + automatic = true + indexing_mode = "Consistent" + + composite_index { + index { + path = "/path1" + order = "Ascending" + } + index { + path = "/path2" + order = "Descending" + } + } + + composite_index { + index { + path = "/path3" + order = "Ascending" + } + index { + path = "/path4" + order = "Descending" + } + } + + spatial_index { + path = "/path/*" + } + + spatial_index { + path = "/test/to/all/?" + } + } + + conflict_resolution_policy { + mode = "LastWriterWins" + conflict_resolution_path = "/_ts" + } +} +`, CosmosGremlinDatabaseResource{}.basic(data), data.RandomInteger) +} + +func (CosmosGremlinGraphResource) indexPolicySpatialIndex(data acceptance.TestData) string { + return fmt.Sprintf(` +%[1]s + +resource "azurerm_cosmosdb_gremlin_graph" "test" { + name = "acctest-CGRPC-%[2]d" + resource_group_name = azurerm_cosmosdb_account.test.resource_group_name + account_name = azurerm_cosmosdb_account.test.name + database_name = azurerm_cosmosdb_gremlin_database.test.name + partition_key_path = "/test" + throughput = 400 + + index_policy { + automatic = true + indexing_mode = "Consistent" + + composite_index { + index { + path = "/path1" + order = "Ascending" + } + index { + path = "/path2" + order = "Descending" + } + } + + composite_index { + index { + path = "/path3" + order = "Ascending" + } + index { + path = "/path4" + order = "Descending" + } + } + } + + conflict_resolution_policy { + mode = "LastWriterWins" + conflict_resolution_path = "/_ts" + } +} +`, CosmosGremlinDatabaseResource{}.basic(data), data.RandomInteger) +} + func (CosmosGremlinGraphResource) update(data acceptance.TestData, throughput int, defaultTTL int) string { return fmt.Sprintf(` %[1]s @@ -298,3 +423,18 @@ resource "azurerm_cosmosdb_gremlin_graph" "test" { } `, CosmosGremlinDatabaseResource{}.basic(data), data.RandomInteger, maxThroughput) } + +func (CosmosGremlinGraphResource) partition_key_version(data acceptance.TestData, version int) string { + return fmt.Sprintf(` +%[1]s + +resource "azurerm_cosmosdb_gremlin_graph" "test" { + name = "acctest-CGRPC-%[2]d" + resource_group_name = azurerm_cosmosdb_account.test.resource_group_name + account_name = azurerm_cosmosdb_account.test.name + database_name = azurerm_cosmosdb_gremlin_database.test.name + partition_key_path = "/test" + partition_key_version = %[3]d +} +`, CosmosGremlinDatabaseResource{}.basic(data), data.RandomInteger, version) +} diff --git a/azurerm/internal/services/cosmos/cosmosdb_mongo_collection_resource.go b/azurerm/internal/services/cosmos/cosmosdb_mongo_collection_resource.go index e0786a511b90..20ce05f77a53 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_mongo_collection_resource.go +++ b/azurerm/internal/services/cosmos/cosmosdb_mongo_collection_resource.go @@ -5,12 +5,8 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -18,12 +14,14 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/migration" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceCosmosDbMongoCollection() *schema.Resource { - return &schema.Resource{ +func resourceCosmosDbMongoCollection() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceCosmosDbMongoCollectionCreate, Read: resourceCosmosDbMongoCollectionRead, Update: resourceCosmosDbMongoCollectionUpdate, @@ -37,16 +35,16 @@ func resourceCosmosDbMongoCollection() *schema.Resource { 0: migration.MongoCollectionV0ToV1{}, }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosEntityName, @@ -55,14 +53,14 @@ func resourceCosmosDbMongoCollection() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "account_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosAccountName, }, "database_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosEntityName, @@ -70,7 +68,7 @@ func resourceCosmosDbMongoCollection() *schema.Resource { // SDK/api accepts an array.. but only one is allowed "shard_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, @@ -78,19 +76,19 @@ func resourceCosmosDbMongoCollection() *schema.Resource { // default TTL is simply an index on _ts with expireAfterOption, given we can't seem to set TTLs on a given index lets expose this to match the portal "default_ttl_seconds": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, ValidateFunc: validation.IntAtLeast(-1), }, "analytical_storage_ttl": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, ValidateFunc: validation.IntAtLeast(-1), }, "throughput": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validate.CosmosThroughput, @@ -99,18 +97,18 @@ func resourceCosmosDbMongoCollection() *schema.Resource { "autoscale_settings": common.MongoCollectionAutoscaleSettingsSchema(), "index": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "keys": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, }, "unique": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, @@ -119,18 +117,18 @@ func resourceCosmosDbMongoCollection() *schema.Resource { }, "system_indexes": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "keys": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, }, "unique": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, }, @@ -140,7 +138,7 @@ func resourceCosmosDbMongoCollection() *schema.Resource { } } -func resourceCosmosDbMongoCollectionCreate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbMongoCollectionCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.MongoDbClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -172,7 +170,7 @@ func resourceCosmosDbMongoCollectionCreate(d *schema.ResourceData, meta interfac MongoDBCollectionCreateUpdateProperties: &documentdb.MongoDBCollectionCreateUpdateProperties{ Resource: &documentdb.MongoDBCollectionResource{ ID: &name, - Indexes: expandCosmosMongoCollectionIndex(d.Get("index").(*schema.Set).List(), ttl), + Indexes: expandCosmosMongoCollectionIndex(d.Get("index").(*pluginsdk.Set).List(), ttl), }, Options: &documentdb.CreateUpdateOptions{}, }, @@ -221,7 +219,7 @@ func resourceCosmosDbMongoCollectionCreate(d *schema.ResourceData, meta interfac return resourceCosmosDbMongoCollectionRead(d, meta) } -func resourceCosmosDbMongoCollectionUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbMongoCollectionUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.MongoDbClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -245,7 +243,7 @@ func resourceCosmosDbMongoCollectionUpdate(d *schema.ResourceData, meta interfac MongoDBCollectionCreateUpdateProperties: &documentdb.MongoDBCollectionCreateUpdateProperties{ Resource: &documentdb.MongoDBCollectionResource{ ID: &id.CollectionName, - Indexes: expandCosmosMongoCollectionIndex(d.Get("index").(*schema.Set).List(), ttl), + Indexes: expandCosmosMongoCollectionIndex(d.Get("index").(*pluginsdk.Set).List(), ttl), }, Options: &documentdb.CreateUpdateOptions{}, }, @@ -288,7 +286,7 @@ func resourceCosmosDbMongoCollectionUpdate(d *schema.ResourceData, meta interfac return resourceCosmosDbMongoCollectionRead(d, meta) } -func resourceCosmosDbMongoCollectionRead(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbMongoCollectionRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.MongoDbClient accClient := meta.(*clients.Client).Cosmos.DatabaseClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) @@ -374,7 +372,7 @@ func resourceCosmosDbMongoCollectionRead(d *schema.ResourceData, meta interface{ return nil } -func resourceCosmosDbMongoCollectionDelete(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbMongoCollectionDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.MongoDbClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/cosmos/cosmosdb_mongo_collection_resource_test.go b/azurerm/internal/services/cosmos/cosmosdb_mongo_collection_resource_test.go index eca61c321792..af7de9afba74 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_mongo_collection_resource_test.go +++ b/azurerm/internal/services/cosmos/cosmosdb_mongo_collection_resource_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -22,10 +21,10 @@ func TestAccCosmosDbMongoCollection_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_mongo_collection", "test") r := CosmosMongoCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("throughput").HasValue("400"), ), @@ -38,10 +37,10 @@ func TestAccCosmosDbMongoCollection_complete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_mongo_collection", "test") r := CosmosMongoCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.complete(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("shard_key").HasValue("seven"), check.That(data.ResourceName).Key("default_ttl_seconds").HasValue("707"), @@ -55,16 +54,16 @@ func TestAccCosmosDbMongoCollection_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_mongo_collection", "test") r := CosmosMongoCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Config: r.complete(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("shard_key").HasValue("seven"), check.That(data.ResourceName).Key("default_ttl_seconds").HasValue("707"), @@ -73,7 +72,7 @@ func TestAccCosmosDbMongoCollection_update(t *testing.T) { data.ImportStep(), { Config: r.updated(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("default_ttl_seconds").HasValue("70707"), ), @@ -86,24 +85,24 @@ func TestAccCosmosDbMongoCollection_throughput(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_mongo_collection", "test") r := CosmosMongoCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.throughput(data, 700), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.throughput(data, 1400), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.basic(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -115,10 +114,10 @@ func TestAccCosmosDbMongoCollection_withIndex(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_mongo_collection", "test") r := CosmosMongoCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.withIndex(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("default_ttl_seconds").HasValue("707"), check.That(data.ResourceName).Key("index.#").HasValue("4"), @@ -133,10 +132,10 @@ func TestAccCosmosDbMongoCollection_analyticalStorageTTL(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_mongo_collection", "test") r := CosmosMongoCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.analyticalStorageTTL(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("analytical_storage_ttl").HasValue("600"), ), @@ -149,10 +148,10 @@ func TestAccCosmosDbMongoCollection_autoscale(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_mongo_collection", "test") r := CosmosMongoCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.autoscale(data, 4000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("4000"), ), @@ -160,7 +159,7 @@ func TestAccCosmosDbMongoCollection_autoscale(t *testing.T) { data.ImportStep(), { Config: r.autoscale(data, 5000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("5000"), ), @@ -168,7 +167,7 @@ func TestAccCosmosDbMongoCollection_autoscale(t *testing.T) { data.ImportStep(), { Config: r.autoscale(data, 4000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("4000"), ), @@ -181,10 +180,10 @@ func TestAccCosmosDbMongoCollection_ver36(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_mongo_collection", "test") r := CosmosMongoCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.ver36(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -196,10 +195,10 @@ func TestAccCosmosDbMongoCollection_serverless(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_mongo_collection", "test") r := CosmosMongoCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.serverless(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -207,7 +206,7 @@ func TestAccCosmosDbMongoCollection_serverless(t *testing.T) { }) } -func (t CosmosMongoCollectionResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t CosmosMongoCollectionResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.MongodbCollectionID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/cosmos/cosmosdb_mongo_database_resource.go b/azurerm/internal/services/cosmos/cosmosdb_mongo_database_resource.go index ec22bf960463..055790107fd2 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_mongo_database_resource.go +++ b/azurerm/internal/services/cosmos/cosmosdb_mongo_database_resource.go @@ -5,11 +5,8 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -17,12 +14,13 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/migration" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceCosmosDbMongoDatabase() *schema.Resource { - return &schema.Resource{ +func resourceCosmosDbMongoDatabase() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceCosmosDbMongoDatabaseCreate, Update: resourceCosmosDbMongoDatabaseUpdate, Read: resourceCosmosDbMongoDatabaseRead, @@ -36,16 +34,16 @@ func resourceCosmosDbMongoDatabase() *schema.Resource { 0: migration.MongoDatabaseV0ToV1{}, }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosEntityName, @@ -54,14 +52,14 @@ func resourceCosmosDbMongoDatabase() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "account_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosAccountName, }, "throughput": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validate.CosmosThroughput, @@ -72,7 +70,7 @@ func resourceCosmosDbMongoDatabase() *schema.Resource { } } -func resourceCosmosDbMongoDatabaseCreate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbMongoDatabaseCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.MongoDbClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -136,7 +134,7 @@ func resourceCosmosDbMongoDatabaseCreate(d *schema.ResourceData, meta interface{ return resourceCosmosDbMongoDatabaseRead(d, meta) } -func resourceCosmosDbMongoDatabaseUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbMongoDatabaseUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.MongoDbClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -191,7 +189,7 @@ func resourceCosmosDbMongoDatabaseUpdate(d *schema.ResourceData, meta interface{ return resourceCosmosDbMongoDatabaseRead(d, meta) } -func resourceCosmosDbMongoDatabaseRead(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbMongoDatabaseRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.MongoDbClient accountClient := meta.(*clients.Client).Cosmos.DatabaseClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) @@ -248,7 +246,7 @@ func resourceCosmosDbMongoDatabaseRead(d *schema.ResourceData, meta interface{}) return nil } -func resourceCosmosDbMongoDatabaseDelete(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbMongoDatabaseDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.MongoDbClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/cosmos/cosmosdb_mongo_database_resource_test.go b/azurerm/internal/services/cosmos/cosmosdb_mongo_database_resource_test.go index 54ca45b0abec..544dfdea3bb2 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_mongo_database_resource_test.go +++ b/azurerm/internal/services/cosmos/cosmosdb_mongo_database_resource_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -22,10 +21,10 @@ func TestAccCosmosDbMongoDatabase_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_mongo_database", "test") r := CosmosMongoDatabaseResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -37,10 +36,10 @@ func TestAccCosmosDbMongoDatabase_complete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_mongo_database", "test") r := CosmosMongoDatabaseResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.complete(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -52,10 +51,10 @@ func TestAccCosmosDbMongoDatabase_autoscale(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_mongo_database", "test") r := CosmosMongoDatabaseResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.autoscale(data, 4000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("4000"), ), @@ -63,7 +62,7 @@ func TestAccCosmosDbMongoDatabase_autoscale(t *testing.T) { data.ImportStep(), { Config: r.autoscale(data, 5000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("5000"), ), @@ -71,7 +70,7 @@ func TestAccCosmosDbMongoDatabase_autoscale(t *testing.T) { data.ImportStep(), { Config: r.autoscale(data, 4000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("4000"), ), @@ -84,10 +83,10 @@ func TestAccCosmosDbMongoDatabase_serverless(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_mongo_database", "test") r := CosmosMongoDatabaseResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.serverless(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -95,7 +94,7 @@ func TestAccCosmosDbMongoDatabase_serverless(t *testing.T) { }) } -func (t CosmosMongoDatabaseResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t CosmosMongoDatabaseResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.MongodbDatabaseID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/cosmos/cosmosdb_notebook_workspace_resource.go b/azurerm/internal/services/cosmos/cosmosdb_notebook_workspace_resource.go index 7e210968ea25..4348e39fa257 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_notebook_workspace_resource.go +++ b/azurerm/internal/services/cosmos/cosmosdb_notebook_workspace_resource.go @@ -6,38 +6,37 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/validate" - azSchema "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceCosmosDbNotebookWorkspace() *schema.Resource { - return &schema.Resource{ +func resourceCosmosDbNotebookWorkspace() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceCosmosDbNotebookWorkspaceCreate, Read: resourceCosmosDbNotebookWorkspaceRead, Delete: resourceCosmosDbNotebookWorkspaceDelete, - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Importer: azSchema.ValidateResourceIDPriorToImport(func(id string) error { + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { _, err := parse.NotebookWorkspaceID(id) return err }), - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ @@ -48,20 +47,20 @@ func resourceCosmosDbNotebookWorkspace() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "account_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosAccountName, }, "server_endpoint": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, } } -func resourceCosmosDbNotebookWorkspaceCreate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbNotebookWorkspaceCreate(d *pluginsdk.ResourceData, meta interface{}) error { subscriptionId := meta.(*clients.Client).Account.SubscriptionId client := meta.(*clients.Client).Cosmos.NotebookWorkspaceClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) @@ -97,7 +96,7 @@ func resourceCosmosDbNotebookWorkspaceCreate(d *schema.ResourceData, meta interf return resourceCosmosDbNotebookWorkspaceRead(d, meta) } -func resourceCosmosDbNotebookWorkspaceRead(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbNotebookWorkspaceRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.NotebookWorkspaceClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -125,7 +124,7 @@ func resourceCosmosDbNotebookWorkspaceRead(d *schema.ResourceData, meta interfac return nil } -func resourceCosmosDbNotebookWorkspaceDelete(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbNotebookWorkspaceDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.NotebookWorkspaceClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/cosmos/cosmosdb_notebook_workspace_resource_test.go b/azurerm/internal/services/cosmos/cosmosdb_notebook_workspace_resource_test.go index efcb6c3cb31c..2f436bfb7c79 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_notebook_workspace_resource_test.go +++ b/azurerm/internal/services/cosmos/cosmosdb_notebook_workspace_resource_test.go @@ -5,12 +5,11 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -19,10 +18,10 @@ type CosmosDbNotebookWorkspaceResource struct{} func TestAccCosmosDbNotebookWorkspace_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_notebook_workspace", "test") r := CosmosDbNotebookWorkspaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -33,10 +32,10 @@ func TestAccCosmosDbNotebookWorkspace_basic(t *testing.T) { func TestAccCosmosDbNotebookWorkspace_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_notebook_workspace", "test") r := CosmosDbNotebookWorkspaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -44,7 +43,7 @@ func TestAccCosmosDbNotebookWorkspace_requiresImport(t *testing.T) { }) } -func (r CosmosDbNotebookWorkspaceResource) Exists(ctx context.Context, client *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (r CosmosDbNotebookWorkspaceResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.NotebookWorkspaceID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/cosmos/cosmosdb_sql_container_resource.go b/azurerm/internal/services/cosmos/cosmosdb_sql_container_resource.go index 82c5fb14902d..a12618b37709 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_sql_container_resource.go +++ b/azurerm/internal/services/cosmos/cosmosdb_sql_container_resource.go @@ -5,12 +5,8 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -18,12 +14,14 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/migration" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceCosmosDbSQLContainer() *schema.Resource { - return &schema.Resource{ +func resourceCosmosDbSQLContainer() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceCosmosDbSQLContainerCreate, Read: resourceCosmosDbSQLContainerRead, Update: resourceCosmosDbSQLContainerUpdate, @@ -37,16 +35,16 @@ func resourceCosmosDbSQLContainer() *schema.Resource { 0: migration.SqlContainerV0ToV1{}, }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosEntityName, @@ -55,28 +53,28 @@ func resourceCosmosDbSQLContainer() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "account_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosAccountName, }, "database_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosEntityName, }, "partition_key_path": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, "partition_key_version": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, ForceNew: true, ValidateFunc: validation.IntBetween(1, 2), @@ -85,7 +83,7 @@ func resourceCosmosDbSQLContainer() *schema.Resource { "conflict_resolution_policy": common.ConflictResolutionPolicy(), "throughput": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validate.CosmosThroughput, @@ -94,30 +92,30 @@ func resourceCosmosDbSQLContainer() *schema.Resource { "autoscale_settings": common.DatabaseAutoscaleSettingsSchema(), "analytical_storage_ttl": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, ValidateFunc: validation.IntAtLeast(-1), }, "default_ttl": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validation.IntAtLeast(-1), }, "unique_key": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "paths": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, }, @@ -129,7 +127,7 @@ func resourceCosmosDbSQLContainer() *schema.Resource { } } -func resourceCosmosDbSQLContainerCreate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbSQLContainerCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.SqlClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -181,7 +179,7 @@ func resourceCosmosDbSQLContainerCreate(d *schema.ResourceData, meta interface{} } } - if keys := expandCosmosSQLContainerUniqueKeys(d.Get("unique_key").(*schema.Set)); keys != nil { + if keys := expandCosmosSQLContainerUniqueKeys(d.Get("unique_key").(*pluginsdk.Set)); keys != nil { db.SQLContainerCreateUpdateProperties.Resource.UniqueKeyPolicy = &documentdb.UniqueKeyPolicy{ UniqueKeys: keys, } @@ -228,7 +226,7 @@ func resourceCosmosDbSQLContainerCreate(d *schema.ResourceData, meta interface{} return resourceCosmosDbSQLContainerRead(d, meta) } -func resourceCosmosDbSQLContainerUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbSQLContainerUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.SqlClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -272,7 +270,7 @@ func resourceCosmosDbSQLContainerUpdate(d *schema.ResourceData, meta interface{} } } - if keys := expandCosmosSQLContainerUniqueKeys(d.Get("unique_key").(*schema.Set)); keys != nil { + if keys := expandCosmosSQLContainerUniqueKeys(d.Get("unique_key").(*pluginsdk.Set)); keys != nil { db.SQLContainerCreateUpdateProperties.Resource.UniqueKeyPolicy = &documentdb.UniqueKeyPolicy{ UniqueKeys: keys, } @@ -313,7 +311,7 @@ func resourceCosmosDbSQLContainerUpdate(d *schema.ResourceData, meta interface{} return resourceCosmosDbSQLContainerRead(d, meta) } -func resourceCosmosDbSQLContainerRead(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbSQLContainerRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.SqlClient accountClient := meta.(*clients.Client).Cosmos.DatabaseClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) @@ -406,7 +404,7 @@ func resourceCosmosDbSQLContainerRead(d *schema.ResourceData, meta interface{}) return nil } -func resourceCosmosDbSQLContainerDelete(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbSQLContainerDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.SqlClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -431,7 +429,7 @@ func resourceCosmosDbSQLContainerDelete(d *schema.ResourceData, meta interface{} return nil } -func expandCosmosSQLContainerUniqueKeys(s *schema.Set) *[]documentdb.UniqueKey { +func expandCosmosSQLContainerUniqueKeys(s *pluginsdk.Set) *[]documentdb.UniqueKey { i := s.List() if len(i) == 0 || i[0] == nil { return nil @@ -441,7 +439,7 @@ func expandCosmosSQLContainerUniqueKeys(s *schema.Set) *[]documentdb.UniqueKey { for _, k := range i { key := k.(map[string]interface{}) - paths := key["paths"].(*schema.Set).List() + paths := key["paths"].(*pluginsdk.Set).List() if len(paths) == 0 { continue } diff --git a/azurerm/internal/services/cosmos/cosmosdb_sql_container_resource_test.go b/azurerm/internal/services/cosmos/cosmosdb_sql_container_resource_test.go index 78ed182d7af7..2660667269b5 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_sql_container_resource_test.go +++ b/azurerm/internal/services/cosmos/cosmosdb_sql_container_resource_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -22,11 +21,11 @@ func TestAccCosmosDbSqlContainer_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_sql_container", "test") r := CosmosSqlContainerResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -38,11 +37,11 @@ func TestAccCosmosDbSqlContainer_basic_serverless(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_sql_container", "test") r := CosmosSqlContainerResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic_serverless(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -54,11 +53,11 @@ func TestAccCosmosDbSqlContainer_complete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_sql_container", "test") r := CosmosSqlContainerResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.complete(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -70,11 +69,11 @@ func TestAccCosmosDbSqlContainer_analyticalStorageTTL(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_sql_container", "test") r := CosmosSqlContainerResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.analyticalStorageTTL(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -86,11 +85,11 @@ func TestAccCosmosDbSqlContainer_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_sql_container", "test") r := CosmosSqlContainerResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.complete(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("default_ttl").HasValue("500"), check.That(data.ResourceName).Key("throughput").HasValue("600"), @@ -100,7 +99,7 @@ func TestAccCosmosDbSqlContainer_update(t *testing.T) { { Config: r.update(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("default_ttl").HasValue("1000"), check.That(data.ResourceName).Key("throughput").HasValue("400"), @@ -114,11 +113,11 @@ func TestAccCosmosDbSqlContainer_autoscale(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_sql_container", "test") r := CosmosSqlContainerResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.autoscale(data, 4000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("4000"), ), @@ -127,7 +126,7 @@ func TestAccCosmosDbSqlContainer_autoscale(t *testing.T) { { Config: r.autoscale(data, 5000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("5000"), ), @@ -136,7 +135,7 @@ func TestAccCosmosDbSqlContainer_autoscale(t *testing.T) { { Config: r.autoscale(data, 4000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("4000"), ), @@ -149,11 +148,11 @@ func TestAccCosmosDbSqlContainer_indexing_policy(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_sql_container", "test") r := CosmosSqlContainerResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -161,7 +160,7 @@ func TestAccCosmosDbSqlContainer_indexing_policy(t *testing.T) { { Config: r.indexing_policy(data, "/includedPath01/*", "/excludedPath01/?"), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -169,7 +168,22 @@ func TestAccCosmosDbSqlContainer_indexing_policy(t *testing.T) { { Config: r.indexing_policy(data, "/includedPath02/*", "/excludedPath02/?"), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + { + + Config: r.indexing_policy_update_spatialIndex(data, "/includedPath02/*", "/excludedPath02/?"), + Check: acceptance.ComposeAggregateTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + + Config: r.basic(data), + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -181,11 +195,11 @@ func TestAccCosmosDbSqlContainer_partition_key_version(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_sql_container", "test") r := CosmosSqlContainerResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.partition_key_version(data, 2), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("partition_key_version").HasValue("2"), ), @@ -198,10 +212,10 @@ func TestAccCosmosDbSqlContainer_customConflictResolutionPolicy(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_sql_container", "test") r := CosmosSqlContainerResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.conflictResolutionPolicy(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -209,7 +223,7 @@ func TestAccCosmosDbSqlContainer_customConflictResolutionPolicy(t *testing.T) { }) } -func (t CosmosSqlContainerResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t CosmosSqlContainerResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.SqlContainerID(state.ID) if err != nil { return nil, err @@ -451,6 +465,66 @@ resource "azurerm_cosmosdb_sql_container" "test" { `, CosmosSqlDatabaseResource{}.basic(data), data.RandomInteger, includedPath, excludedPath) } +func (CosmosSqlContainerResource) indexing_policy_update_spatialIndex(data acceptance.TestData, includedPath, excludedPath string) string { + return fmt.Sprintf(` +%[1]s + +resource "azurerm_cosmosdb_sql_container" "test" { + name = "acctest-CSQLC-%[2]d" + resource_group_name = azurerm_cosmosdb_account.test.resource_group_name + account_name = azurerm_cosmosdb_account.test.name + database_name = azurerm_cosmosdb_sql_database.test.name + partition_key_path = "/definition/id" + + indexing_policy { + indexing_mode = "Consistent" + + included_path { + path = "/*" + } + + included_path { + path = "%s" + } + + excluded_path { + path = "%s" + } + + composite_index { + index { + path = "/path1" + order = "Ascending" + } + index { + path = "/path2" + order = "Descending" + } + } + + composite_index { + index { + path = "/path3" + order = "Ascending" + } + index { + path = "/path4" + order = "Descending" + } + } + + spatial_index { + path = "/path/*" + } + + spatial_index { + path = "/test/to/all/?" + } + } +} +`, CosmosSqlDatabaseResource{}.basic(data), data.RandomInteger, includedPath, excludedPath) +} + func (CosmosSqlContainerResource) partition_key_version(data acceptance.TestData, version int) string { return fmt.Sprintf(` %[1]s diff --git a/azurerm/internal/services/cosmos/cosmosdb_sql_database_resource.go b/azurerm/internal/services/cosmos/cosmosdb_sql_database_resource.go index a80dc3a25b22..1f6b73bc50c7 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_sql_database_resource.go +++ b/azurerm/internal/services/cosmos/cosmosdb_sql_database_resource.go @@ -5,11 +5,8 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -17,12 +14,13 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/migration" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceCosmosDbSQLDatabase() *schema.Resource { - return &schema.Resource{ +func resourceCosmosDbSQLDatabase() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceCosmosDbSQLDatabaseCreate, Read: resourceCosmosDbSQLDatabaseRead, Update: resourceCosmosDbSQLDatabaseUpdate, @@ -36,16 +34,16 @@ func resourceCosmosDbSQLDatabase() *schema.Resource { 0: migration.SqlDatabaseV0ToV1{}, }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosEntityName, @@ -54,14 +52,14 @@ func resourceCosmosDbSQLDatabase() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "account_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosAccountName, }, "throughput": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validate.CosmosThroughput, @@ -72,7 +70,7 @@ func resourceCosmosDbSQLDatabase() *schema.Resource { } } -func resourceCosmosDbSQLDatabaseCreate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbSQLDatabaseCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.SqlClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -136,7 +134,7 @@ func resourceCosmosDbSQLDatabaseCreate(d *schema.ResourceData, meta interface{}) return resourceCosmosDbSQLDatabaseRead(d, meta) } -func resourceCosmosDbSQLDatabaseUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbSQLDatabaseUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.SqlClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -187,7 +185,7 @@ func resourceCosmosDbSQLDatabaseUpdate(d *schema.ResourceData, meta interface{}) return resourceCosmosDbSQLDatabaseRead(d, meta) } -func resourceCosmosDbSQLDatabaseRead(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbSQLDatabaseRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.SqlClient accountClient := meta.(*clients.Client).Cosmos.DatabaseClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) @@ -244,7 +242,7 @@ func resourceCosmosDbSQLDatabaseRead(d *schema.ResourceData, meta interface{}) e return nil } -func resourceCosmosDbSQLDatabaseDelete(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbSQLDatabaseDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.SqlClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/cosmos/cosmosdb_sql_database_resource_test.go b/azurerm/internal/services/cosmos/cosmosdb_sql_database_resource_test.go index b2d9f1dc1ee1..e9ee745e397b 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_sql_database_resource_test.go +++ b/azurerm/internal/services/cosmos/cosmosdb_sql_database_resource_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -22,10 +21,10 @@ func TestAccCosmosDbSqlDatabase_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_sql_database", "test") r := CosmosSqlDatabaseResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -37,11 +36,11 @@ func TestAccCosmosDbSqlDatabase_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_sql_database", "test") r := CosmosSqlDatabaseResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.throughput(data, 700), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("throughput").HasValue("700"), ), @@ -50,7 +49,7 @@ func TestAccCosmosDbSqlDatabase_update(t *testing.T) { { Config: r.throughput(data, 1700), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("throughput").HasValue("1700"), ), @@ -63,10 +62,10 @@ func TestAccCosmosDbSqlDatabase_autoscale(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_sql_database", "test") r := CosmosSqlDatabaseResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.autoscale(data, 4000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("4000"), ), @@ -75,7 +74,7 @@ func TestAccCosmosDbSqlDatabase_autoscale(t *testing.T) { { Config: r.autoscale(data, 5000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("5000"), ), @@ -84,7 +83,7 @@ func TestAccCosmosDbSqlDatabase_autoscale(t *testing.T) { { Config: r.autoscale(data, 4000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("4000"), ), @@ -97,10 +96,10 @@ func TestAccCosmosDbSqlDatabase_serverless(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_sql_database", "test") r := CosmosSqlDatabaseResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.serverless(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -108,7 +107,7 @@ func TestAccCosmosDbSqlDatabase_serverless(t *testing.T) { }) } -func (t CosmosSqlDatabaseResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t CosmosSqlDatabaseResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.SqlDatabaseID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/cosmos/cosmosdb_sql_function_resource.go b/azurerm/internal/services/cosmos/cosmosdb_sql_function_resource.go index a1b6d30fed3a..b6e495bdedab 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_sql_function_resource.go +++ b/azurerm/internal/services/cosmos/cosmosdb_sql_function_resource.go @@ -6,59 +6,58 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/validate" - azSchema "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceCosmosDbSQLFunction() *schema.Resource { - return &schema.Resource{ +func resourceCosmosDbSQLFunction() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceCosmosDbSQLFunctionCreateUpdate, Read: resourceCosmosDbSQLFunctionRead, Update: resourceCosmosDbSQLFunctionCreateUpdate, Delete: resourceCosmosDbSQLFunctionDelete, - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Importer: azSchema.ValidateResourceIDPriorToImport(func(id string) error { + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { _, err := parse.SqlFunctionID(id) return err }), - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "container_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.SqlContainerID, }, "body": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, }, } } -func resourceCosmosDbSQLFunctionCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbSQLFunctionCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { subscriptionId := meta.(*clients.Client).Account.SubscriptionId client := meta.(*clients.Client).Cosmos.SqlResourceClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) @@ -104,7 +103,7 @@ func resourceCosmosDbSQLFunctionCreateUpdate(d *schema.ResourceData, meta interf return resourceCosmosDbSQLFunctionRead(d, meta) } -func resourceCosmosDbSQLFunctionRead(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbSQLFunctionRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.SqlResourceClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -134,7 +133,7 @@ func resourceCosmosDbSQLFunctionRead(d *schema.ResourceData, meta interface{}) e return nil } -func resourceCosmosDbSQLFunctionDelete(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbSQLFunctionDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.SqlResourceClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/cosmos/cosmosdb_sql_function_resource_test.go b/azurerm/internal/services/cosmos/cosmosdb_sql_function_resource_test.go index 7bb9c6334ae0..2888501fd46c 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_sql_function_resource_test.go +++ b/azurerm/internal/services/cosmos/cosmosdb_sql_function_resource_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -20,10 +19,10 @@ type CosmosDbSQLFunctionResource struct{} func TestAccCosmosDbSQLFunction_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_sql_function", "test") r := CosmosDbSQLFunctionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -34,10 +33,10 @@ func TestAccCosmosDbSQLFunction_basic(t *testing.T) { func TestAccCosmosDbSQLFunction_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_sql_function", "test") r := CosmosDbSQLFunctionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -48,24 +47,24 @@ func TestAccCosmosDbSQLFunction_requiresImport(t *testing.T) { func TestAccCosmosDbSQLFunction_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_sql_function", "test") r := CosmosDbSQLFunctionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.update(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -73,7 +72,7 @@ func TestAccCosmosDbSQLFunction_update(t *testing.T) { }) } -func (r CosmosDbSQLFunctionResource) Exists(ctx context.Context, client *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (r CosmosDbSQLFunctionResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.SqlFunctionID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/cosmos/cosmosdb_sql_stored_procedure_resource.go b/azurerm/internal/services/cosmos/cosmosdb_sql_stored_procedure_resource.go index 5f9d34231337..c23868db1f80 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_sql_stored_procedure_resource.go +++ b/azurerm/internal/services/cosmos/cosmosdb_sql_stored_procedure_resource.go @@ -6,24 +6,20 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/validate" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/hashicorp/go-azure-helpers/response" - - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceCosmosDbSQLStoredProcedure() *schema.Resource { - return &schema.Resource{ +func resourceCosmosDbSQLStoredProcedure() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceCosmosDbSQLStoredProcedureCreate, Read: resourceCosmosDbSQLStoredProcedureRead, Update: resourceCosmosDbSQLStoredProcedureUpdate, @@ -32,16 +28,16 @@ func resourceCosmosDbSQLStoredProcedure() *schema.Resource { // TODO: replace this with an importer which validates the ID during import Importer: pluginsdk.DefaultImporter(), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, @@ -50,27 +46,27 @@ func resourceCosmosDbSQLStoredProcedure() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "account_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosAccountName, }, "body": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, "container_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosEntityName, }, "database_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosEntityName, @@ -79,7 +75,7 @@ func resourceCosmosDbSQLStoredProcedure() *schema.Resource { } } -func resourceCosmosDbSQLStoredProcedureCreate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbSQLStoredProcedureCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.SqlClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -133,7 +129,7 @@ func resourceCosmosDbSQLStoredProcedureCreate(d *schema.ResourceData, meta inter return resourceCosmosDbSQLStoredProcedureRead(d, meta) } -func resourceCosmosDbSQLStoredProcedureUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbSQLStoredProcedureUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.SqlClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -170,7 +166,7 @@ func resourceCosmosDbSQLStoredProcedureUpdate(d *schema.ResourceData, meta inter return resourceCosmosDbSQLStoredProcedureRead(d, meta) } -func resourceCosmosDbSQLStoredProcedureRead(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbSQLStoredProcedureRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.SqlClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -206,7 +202,7 @@ func resourceCosmosDbSQLStoredProcedureRead(d *schema.ResourceData, meta interfa return nil } -func resourceCosmosDbSQLStoredProcedureDelete(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbSQLStoredProcedureDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.SqlClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/cosmos/cosmosdb_sql_stored_procedure_resource_test.go b/azurerm/internal/services/cosmos/cosmosdb_sql_stored_procedure_resource_test.go index e532bd7bc04d..47253415a209 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_sql_stored_procedure_resource_test.go +++ b/azurerm/internal/services/cosmos/cosmosdb_sql_stored_procedure_resource_test.go @@ -5,16 +5,12 @@ import ( "fmt" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" - "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" - - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -25,10 +21,10 @@ func TestAccCosmosDbSqlStoredProcedure_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_sql_stored_procedure", "test") r := CosmosSqlStoredProcedureResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -40,11 +36,11 @@ func TestAccCosmosDbSqlStoredProcedure_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_sql_stored_procedure", "test") r := CosmosSqlStoredProcedureResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -52,7 +48,7 @@ func TestAccCosmosDbSqlStoredProcedure_update(t *testing.T) { { Config: r.update(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -60,7 +56,7 @@ func TestAccCosmosDbSqlStoredProcedure_update(t *testing.T) { }) } -func (t CosmosSqlStoredProcedureResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t CosmosSqlStoredProcedureResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.SqlStoredProcedureID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/cosmos/cosmosdb_sql_trigger_resource.go b/azurerm/internal/services/cosmos/cosmosdb_sql_trigger_resource.go index eddcf57aa332..55164cb14061 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_sql_trigger_resource.go +++ b/azurerm/internal/services/cosmos/cosmosdb_sql_trigger_resource.go @@ -6,59 +6,58 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/validate" - azSchema "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceCosmosDbSQLTrigger() *schema.Resource { - return &schema.Resource{ +func resourceCosmosDbSQLTrigger() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceCosmosDbSQLTriggerCreateUpdate, Read: resourceCosmosDbSQLTriggerRead, Update: resourceCosmosDbSQLTriggerCreateUpdate, Delete: resourceCosmosDbSQLTriggerDelete, - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Importer: azSchema.ValidateResourceIDPriorToImport(func(id string) error { + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { _, err := parse.SqlTriggerID(id) return err }), - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosEntityName, }, "container_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.SqlContainerID, }, "body": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, "operation": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ string(documentdb.All), @@ -70,7 +69,7 @@ func resourceCosmosDbSQLTrigger() *schema.Resource { }, "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ string(documentdb.Pre), @@ -80,7 +79,7 @@ func resourceCosmosDbSQLTrigger() *schema.Resource { }, } } -func resourceCosmosDbSQLTriggerCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbSQLTriggerCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { subscriptionId := meta.(*clients.Client).Account.SubscriptionId client := meta.(*clients.Client).Cosmos.SqlResourceClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) @@ -130,7 +129,7 @@ func resourceCosmosDbSQLTriggerCreateUpdate(d *schema.ResourceData, meta interfa return resourceCosmosDbSQLTriggerRead(d, meta) } -func resourceCosmosDbSQLTriggerRead(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbSQLTriggerRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.SqlResourceClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -162,7 +161,7 @@ func resourceCosmosDbSQLTriggerRead(d *schema.ResourceData, meta interface{}) er return nil } -func resourceCosmosDbSQLTriggerDelete(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbSQLTriggerDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.SqlResourceClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/cosmos/cosmosdb_sql_trigger_resource_test.go b/azurerm/internal/services/cosmos/cosmosdb_sql_trigger_resource_test.go index 63978db320f7..469ecb82d179 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_sql_trigger_resource_test.go +++ b/azurerm/internal/services/cosmos/cosmosdb_sql_trigger_resource_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -20,10 +19,10 @@ type CosmosDbSQLTriggerResource struct{} func TestAccCosmosDbSQLTrigger_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_sql_trigger", "test") r := CosmosDbSQLTriggerResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -34,10 +33,10 @@ func TestAccCosmosDbSQLTrigger_basic(t *testing.T) { func TestAccCosmosDbSQLTrigger_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_sql_trigger", "test") r := CosmosDbSQLTriggerResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -48,24 +47,24 @@ func TestAccCosmosDbSQLTrigger_requiresImport(t *testing.T) { func TestAccCosmosDbSQLTrigger_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_sql_trigger", "test") r := CosmosDbSQLTriggerResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.update(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -73,7 +72,7 @@ func TestAccCosmosDbSQLTrigger_update(t *testing.T) { }) } -func (r CosmosDbSQLTriggerResource) Exists(ctx context.Context, client *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (r CosmosDbSQLTriggerResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.SqlTriggerID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/cosmos/cosmosdb_table_resource.go b/azurerm/internal/services/cosmos/cosmosdb_table_resource.go index f46fd863942a..0baff6dbe35f 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_table_resource.go +++ b/azurerm/internal/services/cosmos/cosmosdb_table_resource.go @@ -5,11 +5,8 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -17,12 +14,13 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/migration" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceCosmosDbTable() *schema.Resource { - return &schema.Resource{ +func resourceCosmosDbTable() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceCosmosDbTableCreate, Read: resourceCosmosDbTableRead, Update: resourceCosmosDbTableUpdate, @@ -36,16 +34,16 @@ func resourceCosmosDbTable() *schema.Resource { 0: migration.TableV0ToV1{}, }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosEntityName, @@ -54,14 +52,14 @@ func resourceCosmosDbTable() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "account_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.CosmosAccountName, }, "throughput": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validate.CosmosThroughput, @@ -72,7 +70,7 @@ func resourceCosmosDbTable() *schema.Resource { } } -func resourceCosmosDbTableCreate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbTableCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.TableClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -136,7 +134,7 @@ func resourceCosmosDbTableCreate(d *schema.ResourceData, meta interface{}) error return resourceCosmosDbTableRead(d, meta) } -func resourceCosmosDbTableUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbTableUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.TableClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -187,7 +185,7 @@ func resourceCosmosDbTableUpdate(d *schema.ResourceData, meta interface{}) error return resourceCosmosDbTableRead(d, meta) } -func resourceCosmosDbTableRead(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbTableRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.TableClient accountClient := meta.(*clients.Client).Cosmos.DatabaseClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) @@ -244,7 +242,7 @@ func resourceCosmosDbTableRead(d *schema.ResourceData, meta interface{}) error { return nil } -func resourceCosmosDbTableDelete(d *schema.ResourceData, meta interface{}) error { +func resourceCosmosDbTableDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Cosmos.TableClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/cosmos/cosmosdb_table_resource_test.go b/azurerm/internal/services/cosmos/cosmosdb_table_resource_test.go index c4e988742b1f..7cd650305850 100644 --- a/azurerm/internal/services/cosmos/cosmosdb_table_resource_test.go +++ b/azurerm/internal/services/cosmos/cosmosdb_table_resource_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2021-01-15/documentdb" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/cosmos/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -22,10 +21,10 @@ func TestAccCosmosDbTable_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_table", "test") r := CosmosTableResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -37,11 +36,11 @@ func TestAccCosmosDbTable_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_table", "test") r := CosmosTableResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.throughput(data, 700), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("throughput").HasValue("700"), ), @@ -50,7 +49,7 @@ func TestAccCosmosDbTable_update(t *testing.T) { { Config: r.throughput(data, 1700), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("throughput").HasValue("1700"), ), @@ -63,10 +62,10 @@ func TestAccCosmosDbTable_autoscale(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_table", "test") r := CosmosTableResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.autoscale(data, 4000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("4000"), ), @@ -74,7 +73,7 @@ func TestAccCosmosDbTable_autoscale(t *testing.T) { data.ImportStep(), { Config: r.autoscale(data, 5000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("5000"), ), @@ -82,7 +81,7 @@ func TestAccCosmosDbTable_autoscale(t *testing.T) { data.ImportStep(), { Config: r.autoscale(data, 4000), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("autoscale_settings.0.max_throughput").HasValue("4000"), ), @@ -95,10 +94,10 @@ func TestAccCosmosDbTable_serverless(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_cosmosdb_table", "test") r := CosmosTableResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.serverless(data), - Check: resource.ComposeAggregateTestCheckFunc( + Check: acceptance.ComposeAggregateTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -106,7 +105,7 @@ func TestAccCosmosDbTable_serverless(t *testing.T) { }) } -func (t CosmosTableResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (t CosmosTableResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.TableID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/cosmos/migration/cassandra_keyspace.go b/azurerm/internal/services/cosmos/migration/cassandra_keyspace.go index 2302c912a50e..b85048d28b71 100644 --- a/azurerm/internal/services/cosmos/migration/cassandra_keyspace.go +++ b/azurerm/internal/services/cosmos/migration/cassandra_keyspace.go @@ -5,7 +5,6 @@ import ( "log" "strings" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) @@ -14,27 +13,27 @@ var _ pluginsdk.StateUpgrade = CassandraKeyspaceV0ToV1{} type CassandraKeyspaceV0ToV1 struct{} func (CassandraKeyspaceV0ToV1) Schema() map[string]*pluginsdk.Schema { - return map[string]*schema.Schema{ + return map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "resource_group_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "account_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "throughput": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, }, diff --git a/azurerm/internal/services/cosmos/migration/gremlin_database.go b/azurerm/internal/services/cosmos/migration/gremlin_database.go index b78770487134..70fa0d5b2508 100644 --- a/azurerm/internal/services/cosmos/migration/gremlin_database.go +++ b/azurerm/internal/services/cosmos/migration/gremlin_database.go @@ -5,7 +5,6 @@ import ( "log" "strings" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) @@ -14,27 +13,27 @@ var _ pluginsdk.StateUpgrade = GremlinDatabaseV0ToV1{} type GremlinDatabaseV0ToV1 struct{} func (GremlinDatabaseV0ToV1) Schema() map[string]*pluginsdk.Schema { - return map[string]*schema.Schema{ + return map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "resource_group_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "account_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "throughput": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, }, diff --git a/azurerm/internal/services/cosmos/migration/gremlin_graph.go b/azurerm/internal/services/cosmos/migration/gremlin_graph.go index 39c8fa522aee..38c827df4d1c 100644 --- a/azurerm/internal/services/cosmos/migration/gremlin_graph.go +++ b/azurerm/internal/services/cosmos/migration/gremlin_graph.go @@ -5,7 +5,6 @@ import ( "log" "strings" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) @@ -14,99 +13,99 @@ var _ pluginsdk.StateUpgrade = GremlinGraphV0ToV1{} type GremlinGraphV0ToV1 struct{} func (GremlinGraphV0ToV1) Schema() map[string]*pluginsdk.Schema { - return map[string]*schema.Schema{ + return map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "resource_group_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "account_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "database_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "throughput": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, }, "partition_key_path": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, }, "index_policy": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "automatic": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: true, }, "indexing_mode": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "included_paths": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, - Set: schema.HashString, + Set: pluginsdk.HashString, }, "excluded_paths": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, - Set: schema.HashString, + Set: pluginsdk.HashString, }, }, }, }, "conflict_resolution_policy": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "mode": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "conflict_resolution_path": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "conflict_resolution_procedure": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, }, @@ -114,17 +113,17 @@ func (GremlinGraphV0ToV1) Schema() map[string]*pluginsdk.Schema { }, "unique_key": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "paths": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, }, diff --git a/azurerm/internal/services/cosmos/migration/mongo_collection.go b/azurerm/internal/services/cosmos/migration/mongo_collection.go index 319e90128bcf..cdcc9e5019ec 100644 --- a/azurerm/internal/services/cosmos/migration/mongo_collection.go +++ b/azurerm/internal/services/cosmos/migration/mongo_collection.go @@ -5,7 +5,6 @@ import ( "log" "strings" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) @@ -14,44 +13,44 @@ var _ pluginsdk.StateUpgrade = MongoCollectionV0ToV1{} type MongoCollectionV0ToV1 struct{} func (MongoCollectionV0ToV1) Schema() map[string]*pluginsdk.Schema { - return map[string]*schema.Schema{ + return map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "resource_group_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "account_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "database_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "shard_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, }, "default_ttl_seconds": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, }, "throughput": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, }, diff --git a/azurerm/internal/services/cosmos/migration/mongo_database.go b/azurerm/internal/services/cosmos/migration/mongo_database.go index 315ef0c4d711..f702ac264b76 100644 --- a/azurerm/internal/services/cosmos/migration/mongo_database.go +++ b/azurerm/internal/services/cosmos/migration/mongo_database.go @@ -5,7 +5,6 @@ import ( "log" "strings" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) @@ -14,27 +13,27 @@ var _ pluginsdk.StateUpgrade = MongoDatabaseV0ToV1{} type MongoDatabaseV0ToV1 struct{} func (MongoDatabaseV0ToV1) Schema() map[string]*pluginsdk.Schema { - return map[string]*schema.Schema{ + return map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "resource_group_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "account_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "throughput": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, }, diff --git a/azurerm/internal/services/cosmos/migration/sql_container.go b/azurerm/internal/services/cosmos/migration/sql_container.go index 3b47dd971e93..765ff68909bc 100644 --- a/azurerm/internal/services/cosmos/migration/sql_container.go +++ b/azurerm/internal/services/cosmos/migration/sql_container.go @@ -5,7 +5,6 @@ import ( "log" "strings" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) @@ -14,61 +13,61 @@ var _ pluginsdk.StateUpgrade = SqlContainerV0ToV1{} type SqlContainerV0ToV1 struct{} func (SqlContainerV0ToV1) Schema() map[string]*pluginsdk.Schema { - return map[string]*schema.Schema{ + return map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "resource_group_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "account_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "database_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "partition_key_path": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, }, "throughput": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, }, "default_ttl": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, }, "unique_key": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "paths": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, }, diff --git a/azurerm/internal/services/cosmos/migration/sql_database.go b/azurerm/internal/services/cosmos/migration/sql_database.go index e92f59b6497f..ea2e604d5ca5 100644 --- a/azurerm/internal/services/cosmos/migration/sql_database.go +++ b/azurerm/internal/services/cosmos/migration/sql_database.go @@ -5,7 +5,6 @@ import ( "log" "strings" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) @@ -14,27 +13,27 @@ var _ pluginsdk.StateUpgrade = SqlDatabaseV0ToV1{} type SqlDatabaseV0ToV1 struct{} func (SqlDatabaseV0ToV1) Schema() map[string]*pluginsdk.Schema { - return map[string]*schema.Schema{ + return map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "resource_group_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "account_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "throughput": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, }, diff --git a/azurerm/internal/services/cosmos/migration/table.go b/azurerm/internal/services/cosmos/migration/table.go index b4f157eed4e4..5a0986b56c88 100644 --- a/azurerm/internal/services/cosmos/migration/table.go +++ b/azurerm/internal/services/cosmos/migration/table.go @@ -5,7 +5,6 @@ import ( "log" "strings" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) @@ -14,27 +13,27 @@ var _ pluginsdk.StateUpgrade = TableV0ToV1{} type TableV0ToV1 struct{} func (TableV0ToV1) Schema() map[string]*pluginsdk.Schema { - return map[string]*schema.Schema{ + return map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "resource_group_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "account_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "throughput": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, }, diff --git a/azurerm/internal/services/cosmos/registration.go b/azurerm/internal/services/cosmos/registration.go index 88bc4365abb5..0b793b77aff8 100644 --- a/azurerm/internal/services/cosmos/registration.go +++ b/azurerm/internal/services/cosmos/registration.go @@ -1,7 +1,7 @@ package cosmos import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) type Registration struct{} @@ -19,15 +19,15 @@ func (r Registration) WebsiteCategories() []string { } // SupportedDataSources returns the supported Data Sources supported by this Service -func (r Registration) SupportedDataSources() map[string]*schema.Resource { - return map[string]*schema.Resource{ +func (r Registration) SupportedDataSources() map[string]*pluginsdk.Resource { + return map[string]*pluginsdk.Resource{ "azurerm_cosmosdb_account": dataSourceCosmosDbAccount(), } } // SupportedResources returns the supported Resources supported by this Service -func (r Registration) SupportedResources() map[string]*schema.Resource { - return map[string]*schema.Resource{ +func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { + return map[string]*pluginsdk.Resource{ "azurerm_cosmosdb_account": resourceCosmosDbAccount(), "azurerm_cosmosdb_cassandra_keyspace": resourceCosmosDbCassandraKeyspace(), "azurerm_cosmosdb_cassandra_table": resourceCosmosDbCassandraTable(), diff --git a/azurerm/internal/services/costmanagement/cost_management_export_resource_group_resource.go b/azurerm/internal/services/costmanagement/cost_management_export_resource_group_resource.go index 24f50db26b78..298ec84f444f 100644 --- a/azurerm/internal/services/costmanagement/cost_management_export_resource_group_resource.go +++ b/azurerm/internal/services/costmanagement/cost_management_export_resource_group_resource.go @@ -5,8 +5,6 @@ import ( "strings" "time" - resourceValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/validate" - "github.com/Azure/azure-sdk-for-go/services/costmanagement/mgmt/2019-10-01/costmanagement" "github.com/Azure/go-autorest/autorest/date" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" @@ -14,6 +12,7 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/costmanagement/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/costmanagement/validate" + resourceValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/resource/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" diff --git a/azurerm/internal/services/databasemigration/database_migration_project_data_source.go b/azurerm/internal/services/databasemigration/database_migration_project_data_source.go index de3574e21b9a..6d0429b5548e 100644 --- a/azurerm/internal/services/databasemigration/database_migration_project_data_source.go +++ b/azurerm/internal/services/databasemigration/database_migration_project_data_source.go @@ -4,14 +4,11 @@ import ( "fmt" "time" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/databasemigration/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/databasemigration/validate" - - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" diff --git a/azurerm/internal/services/databasemigration/database_migration_project_resource_test.go b/azurerm/internal/services/databasemigration/database_migration_project_resource_test.go index e5a5c76ab875..39465285915c 100644 --- a/azurerm/internal/services/databasemigration/database_migration_project_resource_test.go +++ b/azurerm/internal/services/databasemigration/database_migration_project_resource_test.go @@ -5,13 +5,11 @@ import ( "fmt" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/databasemigration/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/databasemigration/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) diff --git a/azurerm/internal/services/databricks/databricks_customer_managed_key_resource.go b/azurerm/internal/services/databricks/databricks_customer_managed_key_resource.go new file mode 100644 index 000000000000..a8b40aa4813b --- /dev/null +++ b/azurerm/internal/services/databricks/databricks_customer_managed_key_resource.go @@ -0,0 +1,309 @@ +package databricks + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/services/databricks/mgmt/2018-04-01/databricks" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/databricks/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/databricks/validate" + keyVaultParse "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/keyvault/parse" + keyVaultValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/keyvault/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceDatabricksWorkspaceCustomerManagedKey() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: DatabricksWorkspaceCustomerManagedKeyCreateUpdate, + Read: DatabricksWorkspaceCustomerManagedKeyRead, + Update: DatabricksWorkspaceCustomerManagedKeyCreateUpdate, + Delete: DatabricksWorkspaceCustomerManagedKeyDelete, + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Importer: pluginsdk.ImporterValidatingResourceIdThen(func(id string) error { + _, err := parse.CustomerManagedKeyID(id) + return err + }, func(ctx context.Context, d *pluginsdk.ResourceData, meta interface{}) ([]*pluginsdk.ResourceData, error) { + client := meta.(*clients.Client).DataBricks.WorkspacesClient + + // validate that the passed ID is a valid CMK configuration ID + customManagedKey, err := parse.CustomerManagedKeyID(d.Id()) + if err != nil { + return []*pluginsdk.ResourceData{d}, fmt.Errorf("parsing Databricks workspace customer managed key ID %q for import: %v", d.Id(), err) + } + + // convert the passed custom Managed Key ID to a valid workspace ID + workspace := parse.NewWorkspaceID(customManagedKey.SubscriptionId, customManagedKey.ResourceGroup, customManagedKey.CustomerMangagedKeyName) + + // validate that the workspace exists + if _, err = client.Get(ctx, workspace.ResourceGroup, workspace.Name); err != nil { + return []*pluginsdk.ResourceData{d}, fmt.Errorf("retrieving the Databricks workspace customer managed key configuration(ID: %q) for workspace (ID: %q): %s", customManagedKey.ID(), workspace.ID(), err) + } + + // set the new values for the CMK resource + d.SetId(customManagedKey.ID()) + d.Set("workspace_id", workspace.ID()) + + return []*pluginsdk.ResourceData{d}, nil + }), + + Schema: map[string]*pluginsdk.Schema{ + "workspace_id": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validate.WorkspaceID, + }, + + // Make this key vault key id and abstract everything from the string... + "key_vault_key_id": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: keyVaultValidate.KeyVaultChildID, + }, + }, + } +} + +func DatabricksWorkspaceCustomerManagedKeyCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + workspaceClient := meta.(*clients.Client).DataBricks.WorkspacesClient + keyVaultsClient := meta.(*clients.Client).KeyVault + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + workspaceIDRaw := d.Get("workspace_id").(string) + workspaceID, err := parse.WorkspaceID(workspaceIDRaw) + if err != nil { + return err + } + + keyIdRaw := d.Get("key_vault_key_id").(string) + key, err := keyVaultParse.ParseNestedItemID(keyIdRaw) + if err != nil { + return err + } + + // Not sure if I should also lock the key vault here too + // or at the very least the key? + locks.ByName(workspaceID.Name, "azurerm_databricks_workspace") + defer locks.UnlockByName(workspaceID.Name, "azurerm_databricks_workspace") + var encryptionEnabled, infrastructureEnabled bool + + workspace, err := workspaceClient.Get(ctx, workspaceID.ResourceGroup, workspaceID.Name) + if err != nil { + return fmt.Errorf("retrieving Databricks Workspace %q (Resource Group %q): %+v", workspaceID.Name, workspaceID.ResourceGroup, err) + } + if workspace.Parameters != nil { + if workspace.Parameters.RequireInfrastructureEncryption != nil { + infrastructureEnabled = *workspace.Parameters.RequireInfrastructureEncryption.Value + } + if workspace.Parameters.PrepareEncryption != nil { + encryptionEnabled = *workspace.Parameters.PrepareEncryption.Value + } + } else { + return fmt.Errorf("retrieving Databricks Workspace %q (Resource Group %q): `WorkspaceCustomParameters` was nil", workspaceID.Name, workspaceID.ResourceGroup) + } + + if infrastructureEnabled { + return fmt.Errorf("Databricks Workspace %q (Resource Group %q): `infrastructure_encryption_enabled` must be set to `false`", workspaceID.Name, workspaceID.ResourceGroup) + } + if !encryptionEnabled { + return fmt.Errorf("Databricks Workspace %q (Resource Group %q): `customer_managed_key_enabled` must be set to `true`", workspaceID.Name, workspaceID.ResourceGroup) + } + + // make sure the key vault exists + keyVaultIdRaw, err := keyVaultsClient.KeyVaultIDFromBaseUrl(ctx, meta.(*clients.Client).Resource, key.KeyVaultBaseUrl) + if err != nil || keyVaultIdRaw == nil { + return fmt.Errorf("retrieving the Resource ID for the Key Vault at URL %q: %+v", key.KeyVaultBaseUrl, err) + } + + resourceID := parse.NewCustomerManagedKeyID(subscriptionId, workspaceID.ResourceGroup, workspaceID.Name) + + if d.IsNewResource() { + if workspace.Parameters.Encryption != nil { + return tf.ImportAsExistsError("azurerm_databricks_workspace_customer_managed_key", resourceID.ID()) + } + } + + // We need to pull all of the custom params from the parent + // workspace resource and then add our new encryption values into the + // structure, else the other values set in the parent workspace + // resource will be lost and overwritten as nil. ¯\_(ツ)_/¯ + // NOTE: 'workspace.Parameters' will never be nil as 'customer_managed_key_enabled' and 'infrastructure_encryption_enabled' + // fields have a default value in the parent workspace resource. + params := workspace.Parameters + params.Encryption = &databricks.WorkspaceEncryptionParameter{ + Value: &databricks.Encryption{ + KeySource: databricks.MicrosoftKeyvault, + KeyName: &key.Name, + KeyVersion: &key.Version, + KeyVaultURI: &key.KeyVaultBaseUrl, + }, + } + + props := databricks.Workspace{ + Location: workspace.Location, + Sku: workspace.Sku, + WorkspaceProperties: &databricks.WorkspaceProperties{ + ManagedResourceGroupID: workspace.WorkspaceProperties.ManagedResourceGroupID, + Parameters: params, + }, + Tags: workspace.Tags, + } + + future, err := workspaceClient.CreateOrUpdate(ctx, props, resourceID.ResourceGroup, resourceID.CustomerMangagedKeyName) + if err != nil { + return fmt.Errorf("creating/updating %s: %+v", resourceID, err) + } + + if err = future.WaitForCompletionRef(ctx, workspaceClient.Client); err != nil { + return fmt.Errorf("waiting for create/update of %s: %+v", resourceID, err) + } + + d.SetId(resourceID.ID()) + return DatabricksWorkspaceCustomerManagedKeyRead(d, meta) +} + +func DatabricksWorkspaceCustomerManagedKeyRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataBricks.WorkspacesClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.CustomerManagedKeyID(d.Id()) + if err != nil { + return err + } + + workspaceId := parse.NewWorkspaceID(id.SubscriptionId, id.ResourceGroup, id.CustomerMangagedKeyName) + + resp, err := client.Get(ctx, id.ResourceGroup, id.CustomerMangagedKeyName) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[DEBUG] %s was not found - removing from state", *id) + d.SetId("") + return nil + } + + return fmt.Errorf("retrieving %s: %+v", *id, err) + } + + keySource := "" + keyName := "" + keyVersion := "" + keyVaultURI := "" + + if resp.WorkspaceProperties.Parameters != nil { + if props := resp.WorkspaceProperties.Parameters.Encryption; props != nil { + if props.Value.KeySource != "" { + keySource = string(props.Value.KeySource) + } + if props.Value.KeyName != nil { + keyName = *props.Value.KeyName + } + if props.Value.KeyVersion != nil { + keyVersion = *props.Value.KeyVersion + } + if props.Value.KeyVaultURI != nil { + keyVaultURI = *props.Value.KeyVaultURI + } + } + } + + // I have to get rid of this check due to import if you want to re-cmk your DBFS. + // This is because when you delete this it sets the key source to default + // if !strings.EqualFold(keySource, string(databricks.MicrosoftKeyvault)) { + // return fmt.Errorf("retrieving Databricks Workspace %q (Resource Group %q): `Workspace.WorkspaceProperties.Encryption.Value.KeySource` was expected to be %q, got %q", id.CustomerMangagedKeyName, id.ResourceGroup, string(databricks.MicrosoftKeyvault), keySource) + // } + + if strings.EqualFold(keySource, string(databricks.MicrosoftKeyvault)) && (keyName == "" || keyVersion == "" || keyVaultURI == "") { + return fmt.Errorf("Databricks Workspace %q (Resource Group %q): `Workspace.WorkspaceProperties.Encryption.Value(s)` were nil", id.CustomerMangagedKeyName, id.ResourceGroup) + } + + d.SetId(id.ID()) + d.Set("workspace_id", workspaceId.ID()) + if keyVaultURI != "" { + key, err := keyVaultParse.NewNestedItemID(keyVaultURI, "keys", keyName, keyVersion) + if err == nil { + d.Set("key_vault_key_id", key.ID()) + } + } + + return nil +} + +func DatabricksWorkspaceCustomerManagedKeyDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataBricks.WorkspacesClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.CustomerManagedKeyID(d.Id()) + if err != nil { + return err + } + + workspaceID := parse.NewWorkspaceID(id.SubscriptionId, id.ResourceGroup, id.CustomerMangagedKeyName) + + // Not sure if I should also lock the key vault here too + locks.ByName(workspaceID.Name, "azurerm_databricks_workspace") + defer locks.UnlockByName(workspaceID.Name, "azurerm_databricks_workspace") + + workspace, err := client.Get(ctx, id.ResourceGroup, id.CustomerMangagedKeyName) + if err != nil { + if utils.ResponseWasNotFound(workspace.Response) { + log.Printf("[DEBUG] %s was not found - removing from state", *id) + d.SetId("") + return nil + } + + return fmt.Errorf("retrieving %s: %+v", *id, err) + } + + // Since this isn't real and you cannot turn off CMK without destroying the + // workspace and recreating it the best I can do is to set the workspace + // back to using Microsoft managed keys and removing the CMK fields + // also need to pull all of the custom params from the parent + // workspace resource and then add our new encryption values into the + // structure, else the other values set in the parent workspace + // resource will be lost and overwritten as nil. ¯\_(ツ)_/¯ + params := workspace.Parameters + params.Encryption = &databricks.WorkspaceEncryptionParameter{ + Value: &databricks.Encryption{ + KeySource: databricks.Default, + }, + } + + props := databricks.Workspace{ + Location: workspace.Location, + Sku: workspace.Sku, + WorkspaceProperties: &databricks.WorkspaceProperties{ + ManagedResourceGroupID: workspace.WorkspaceProperties.ManagedResourceGroupID, + Parameters: params, + }, + Tags: workspace.Tags, + } + + future, err := client.CreateOrUpdate(ctx, props, workspaceID.ResourceGroup, workspaceID.Name) + if err != nil { + return fmt.Errorf("creating/updating %s: %+v", workspaceID, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for create/update of %s: %+v", workspaceID, err) + } + + return nil +} diff --git a/azurerm/internal/services/databricks/databricks_customer_managed_key_resource_test.go b/azurerm/internal/services/databricks/databricks_customer_managed_key_resource_test.go new file mode 100644 index 000000000000..3f0788d706a1 --- /dev/null +++ b/azurerm/internal/services/databricks/databricks_customer_managed_key_resource_test.go @@ -0,0 +1,283 @@ +package databricks_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/services/databricks/mgmt/2018-04-01/databricks" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/databricks/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type DatabricksWorkspaceCustomerManagedKeyResource struct { +} + +func TestAccDatabricksWorkspaceCustomerManagedKey_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_databricks_workspace_customer_managed_key", "test") + parent := acceptance.BuildTestData(t, "azurerm_databricks_workspace", "test") + r := DatabricksWorkspaceCustomerManagedKeyResource{} + cmkTemplate := r.cmkTemplate() + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data, cmkTemplate), + Check: acceptance.ComposeTestCheckFunc( + // You must look for the parent resource (e.g. Databricks Workspace) + // and then derive if the CMK object has been set or not... + check.That(parent.ResourceName).ExistsInAzure(r), + ), + }, + parent.ImportStep(), + }) +} + +func TestAccDatabricksWorkspaceCustomerManagedKey_remove(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_databricks_workspace_customer_managed_key", "test") + parent := acceptance.BuildTestData(t, "azurerm_databricks_workspace", "test") + r := DatabricksWorkspaceCustomerManagedKeyResource{} + cmkTemplate := r.cmkTemplate() + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data, cmkTemplate), + Check: acceptance.ComposeTestCheckFunc( + check.That(parent.ResourceName).ExistsInAzure(r), + ), + }, + parent.ImportStep(), + { + Config: r.basic(data, ""), + Check: acceptance.ComposeTestCheckFunc( + // Then ensure the encryption settings on the Databricks Workspace + // have been reverted to their default state + check.That(parent.ResourceName).DoesNotExistInAzure(r), + ), + }, + parent.ImportStep(), + }) +} + +func TestAccDatabricksWorkspaceCustomerManagedKey_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_databricks_workspace_customer_managed_key", "test") + parent := acceptance.BuildTestData(t, "azurerm_databricks_workspace", "test") + r := DatabricksWorkspaceCustomerManagedKeyResource{} + cmkTemplate := r.cmkTemplate() + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data, cmkTemplate), + Check: acceptance.ComposeTestCheckFunc( + check.That(parent.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func TestAccDatabricksWorkspaceCustomerManagedKey_noIp(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_databricks_workspace_customer_managed_key", "test") + parent := acceptance.BuildTestData(t, "azurerm_databricks_workspace", "test") + r := DatabricksWorkspaceCustomerManagedKeyResource{} + cmkTemplate := r.cmkTemplate() + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.noip(data, cmkTemplate), + Check: acceptance.ComposeTestCheckFunc( + check.That(parent.ResourceName).ExistsInAzure(r), + ), + }, + parent.ImportStep(), + { + Config: r.noip(data, ""), + Check: acceptance.ComposeTestCheckFunc( + check.That(parent.ResourceName).DoesNotExistInAzure(r), + check.That(parent.ResourceName).Key("custom_parameters.0.no_public_ip").IsSet(), + ), + }, + parent.ImportStep(), + }) +} + +func (DatabricksWorkspaceCustomerManagedKeyResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.WorkspaceID(state.ID) + if err != nil { + return nil, err + } + + resp, err := clients.DataBricks.WorkspacesClient.Get(ctx, id.ResourceGroup, id.Name) + if err != nil { + return nil, fmt.Errorf("retrieving Databricks Workspace Customer Mangaged Key %q (resource group: %q): %+v", id.Name, id.ResourceGroup, err) + } + + // This is the only way we can tell if the CMK has actually been provisioned or not... + if resp.WorkspaceProperties.Parameters != nil && resp.WorkspaceProperties.Parameters.Encryption != nil { + if resp.WorkspaceProperties.Parameters.Encryption.Value.KeySource == databricks.MicrosoftKeyvault { + return utils.Bool(true), nil + } + } + + return utils.Bool(false), nil +} + +func (DatabricksWorkspaceCustomerManagedKeyResource) basic(data acceptance.TestData, cmk string) string { + keyVault := DatabricksWorkspaceCustomerManagedKeyResource{}.keyVaultTemplate(data) + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +data "azurerm_client_config" "current" {} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-db-%[1]d" + location = "%[2]s" +} + +%[3]s + +resource "azurerm_databricks_workspace" "test" { + name = "acctestDBW-%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + sku = "premium" + + customer_managed_key_enabled = true +} + +%[4]s +`, data.RandomInteger, data.Locations.Primary, keyVault, cmk) +} + +func (DatabricksWorkspaceCustomerManagedKeyResource) requiresImport(data acceptance.TestData) string { + cmkTemplate := DatabricksWorkspaceCustomerManagedKeyResource{}.cmkTemplate() + template := DatabricksWorkspaceCustomerManagedKeyResource{}.basic(data, cmkTemplate) + return fmt.Sprintf(` +%s + +resource "azurerm_databricks_workspace_customer_managed_key" "import" { + workspace_id = azurerm_databricks_workspace.test.id + key_vault_key_id = azurerm_key_vault_key.test.id +} +`, template) +} + +func (DatabricksWorkspaceCustomerManagedKeyResource) noip(data acceptance.TestData, cmk string) string { + keyVault := DatabricksWorkspaceCustomerManagedKeyResource{}.keyVaultTemplate(data) + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +data "azurerm_client_config" "current" {} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-db-%[1]d" + location = "%[2]s" +} + +%[3]s + +resource "azurerm_databricks_workspace" "test" { + name = "acctestDBW-%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + sku = "premium" + + customer_managed_key_enabled = true + + custom_parameters { + no_public_ip = true + } +} + +%[5]s +`, data.RandomInteger, data.Locations.Primary, keyVault, data.RandomString, cmk) +} + +func (DatabricksWorkspaceCustomerManagedKeyResource) cmkTemplate() string { + return ` +resource "azurerm_databricks_workspace_customer_managed_key" "test" { + depends_on = [azurerm_key_vault_access_policy.databricks] + + workspace_id = azurerm_databricks_workspace.test.id + key_vault_key_id = azurerm_key_vault_key.test.id +} +` +} + +func (DatabricksWorkspaceCustomerManagedKeyResource) keyVaultTemplate(data acceptance.TestData) string { + return fmt.Sprintf(` +resource "azurerm_key_vault" "test" { + name = "acctest-kv-%[3]s" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + tenant_id = data.azurerm_client_config.current.tenant_id + sku_name = "premium" + + soft_delete_retention_days = 7 +} + +resource "azurerm_key_vault_key" "test" { + depends_on = [azurerm_key_vault_access_policy.test] + + name = "acctest-key-%[1]d" + key_vault_id = azurerm_key_vault.test.id + key_type = "RSA" + key_size = 2048 + + key_opts = [ + "decrypt", + "encrypt", + "sign", + "unwrapKey", + "verify", + "wrapKey", + ] +} + +resource "azurerm_key_vault_access_policy" "test" { + key_vault_id = azurerm_key_vault.test.id + tenant_id = azurerm_key_vault.test.tenant_id + object_id = data.azurerm_client_config.current.object_id + + key_permissions = [ + "get", + "list", + "create", + "decrypt", + "encrypt", + "sign", + "unwrapKey", + "verify", + "wrapKey", + "delete", + "restore", + "recover", + "update", + "purge", + ] +} + +resource "azurerm_key_vault_access_policy" "databricks" { + depends_on = [azurerm_databricks_workspace.test] + + key_vault_id = azurerm_key_vault.test.id + tenant_id = azurerm_databricks_workspace.test.storage_account_identity.0.tenant_id + object_id = azurerm_databricks_workspace.test.storage_account_identity.0.principal_id + + key_permissions = [ + "get", + "unwrapKey", + "wrapKey", + "delete", + "purge", + ] +} +`, data.RandomInteger, data.Locations.Primary, data.RandomString) +} diff --git a/azurerm/internal/services/databricks/databricks_workspace_resource.go b/azurerm/internal/services/databricks/databricks_workspace_resource.go index 7fb928406bde..19a602d25d4c 100644 --- a/azurerm/internal/services/databricks/databricks_workspace_resource.go +++ b/azurerm/internal/services/databricks/databricks_workspace_resource.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "log" + "strings" "time" "github.com/Azure/azure-sdk-for-go/services/databricks/mgmt/2018-04-01/databricks" @@ -62,8 +63,6 @@ func resourceDatabricksWorkspace() *pluginsdk.Resource { }, false), }, - "tags": tags.Schema(), - "managed_resource_group_name": { Type: pluginsdk.TypeString, Optional: true, @@ -72,6 +71,20 @@ func resourceDatabricksWorkspace() *pluginsdk.Resource { ValidateFunc: validation.StringIsNotEmpty, }, + "customer_managed_key_enabled": { + Type: pluginsdk.TypeBool, + ForceNew: true, + Optional: true, + Default: false, + }, + + "infrastructure_encryption_enabled": { + Type: pluginsdk.TypeBool, + ForceNew: true, + Optional: true, + Default: false, + }, + "custom_parameters": { Type: pluginsdk.TypeList, Optional: true, @@ -79,31 +92,33 @@ func resourceDatabricksWorkspace() *pluginsdk.Resource { MaxItems: 1, Elem: &pluginsdk.Resource{ Schema: map[string]*pluginsdk.Schema{ + "machine_learning_workspace_id": { + Type: pluginsdk.TypeString, + ForceNew: true, + Optional: true, + ValidateFunc: azure.ValidateResourceID, + AtLeastOneOf: workspaceCustomParametersString(), + }, + "no_public_ip": { - Type: pluginsdk.TypeBool, - ForceNew: true, - Optional: true, - AtLeastOneOf: []string{"custom_parameters.0.no_public_ip", "custom_parameters.0.public_subnet_name", - "custom_parameters.0.private_subnet_name", "custom_parameters.0.virtual_network_id", - }, + Type: pluginsdk.TypeBool, + ForceNew: true, + Optional: true, + AtLeastOneOf: workspaceCustomParametersString(), }, "public_subnet_name": { - Type: pluginsdk.TypeString, - ForceNew: true, - Optional: true, - AtLeastOneOf: []string{"custom_parameters.0.no_public_ip", "custom_parameters.0.public_subnet_name", - "custom_parameters.0.private_subnet_name", "custom_parameters.0.virtual_network_id", - }, + Type: pluginsdk.TypeString, + ForceNew: true, + Optional: true, + AtLeastOneOf: workspaceCustomParametersString(), }, "private_subnet_name": { - Type: pluginsdk.TypeString, - ForceNew: true, - Optional: true, - AtLeastOneOf: []string{"custom_parameters.0.no_public_ip", "custom_parameters.0.public_subnet_name", - "custom_parameters.0.private_subnet_name", "custom_parameters.0.virtual_network_id", - }, + Type: pluginsdk.TypeString, + ForceNew: true, + Optional: true, + AtLeastOneOf: workspaceCustomParametersString(), }, "virtual_network_id": { @@ -111,9 +126,7 @@ func resourceDatabricksWorkspace() *pluginsdk.Resource { ForceNew: true, Optional: true, ValidateFunc: azure.ValidateResourceIDOrEmpty, - AtLeastOneOf: []string{"custom_parameters.0.no_public_ip", "custom_parameters.0.public_subnet_name", - "custom_parameters.0.private_subnet_name", "custom_parameters.0.virtual_network_id", - }, + AtLeastOneOf: workspaceCustomParametersString(), }, }, }, @@ -133,20 +146,51 @@ func resourceDatabricksWorkspace() *pluginsdk.Resource { Type: pluginsdk.TypeString, Computed: true, }, + + "storage_account_identity": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "principal_id": { + Type: pluginsdk.TypeString, + Sensitive: true, + Computed: true, + }, + "tenant_id": { + Type: pluginsdk.TypeString, + Sensitive: true, + Computed: true, + }, + "type": { + Type: pluginsdk.TypeString, + Computed: true, + }, + }, + }, + }, + + "tags": tags.Schema(), }, CustomizeDiff: pluginsdk.CustomizeDiffShim(func(ctx context.Context, d *pluginsdk.ResourceDiff, v interface{}) error { - if d.HasChange("sku") { - sku, changedSKU := d.GetChange("sku") + _, customerEncryptionEnabled := d.GetChange("customer_managed_key_enabled") + _, infrastructureEncryptionEnabled := d.GetChange("infrastructure_encryption_enabled") + oldSku, newSku := d.GetChange("sku") - if changedSKU == "trial" { - log.Printf("[DEBUG] recreate databricks workspace, could't be migrated to %s", changedSKU) + if d.HasChange("sku") { + if newSku == "trial" { + log.Printf("[DEBUG] recreate databricks workspace, cannot be migrated to %s", newSku) d.ForceNew("sku") } else { - log.Printf("[DEBUG] databricks workspace can be upgraded from %s to %s", sku, changedSKU) + log.Printf("[DEBUG] databricks workspace can be upgraded from %s to %s", oldSku, newSku) } } + if (customerEncryptionEnabled.(bool) || infrastructureEncryptionEnabled.(bool)) && !strings.EqualFold("premium", newSku.(string)) { + return fmt.Errorf("'customer_managed_key_enabled' and 'infrastructure_encryption_enabled' are only available with a 'premium' workspace 'sku', got %q", newSku) + } + return nil }), } @@ -184,10 +228,13 @@ func resourceDatabricksWorkspaceCreateUpdate(d *pluginsdk.ResourceData, meta int log.Printf("[DEBUG][azurerm_databricks_workspace] no managed resource group id was provided, we use the default pattern.") managedResourceGroupName = fmt.Sprintf("databricks-rg-%s", id.ResourceGroup) } + managedResourceGroupID := resourcesParse.NewResourceGroupID(subscriptionId, managedResourceGroupName).ID() + customerEncryptionEnabled := d.Get("customer_managed_key_enabled").(bool) + infrastructureEncryptionEnabled := d.Get("infrastructure_encryption_enabled").(bool) customParamsRaw := d.Get("custom_parameters").([]interface{}) - customParams := expandWorkspaceCustomParameters(customParamsRaw) + customParams := expandWorkspaceCustomParameters(customParamsRaw, customerEncryptionEnabled, infrastructureEncryptionEnabled) // Including the Tags in the workspace parameters will update the tags on // the workspace only @@ -279,6 +326,18 @@ func resourceDatabricksWorkspaceRead(d *pluginsdk.ResourceData, meta interface{} return fmt.Errorf("setting `custom_parameters`: %+v", err) } + if err := d.Set("storage_account_identity", flattenWorkspaceStorageAccountIdentity(props.StorageAccountIdentity)); err != nil { + return fmt.Errorf("setting `storage_account_identity`: %+v", err) + } + + if props.Parameters != nil && props.Parameters.PrepareEncryption != nil { + d.Set("customer_managed_key_enabled", &props.Parameters.PrepareEncryption.Value) + } + + if props.Parameters != nil && props.Parameters.RequireInfrastructureEncryption != nil { + d.Set("infrastructure_encryption_enabled", &props.Parameters.RequireInfrastructureEncryption.Value) + } + d.Set("workspace_url", props.WorkspaceURL) d.Set("workspace_id", props.WorkspaceID) } @@ -310,32 +369,72 @@ func resourceDatabricksWorkspaceDelete(d *pluginsdk.ResourceData, meta interface return nil } -func flattenWorkspaceCustomParameters(p *databricks.WorkspaceCustomParameters) []interface{} { - if p == nil { +func flattenWorkspaceStorageAccountIdentity(input *databricks.ManagedIdentityConfiguration) []interface{} { + if input == nil { + return nil + } + + e := make(map[string]interface{}) + + if v := input; v != nil { + if t := v.PrincipalID; t != nil { + if t != nil { + e["principal_id"] = t.String() + } + } + + if t := v.TenantID; t != nil { + if t != nil { + e["tenant_id"] = t.String() + } + } + + if t := v.Type; t != nil { + if t != nil { + e["type"] = *t + } + } + + if len(e) != 0 { + return []interface{}{e} + } + } + + return []interface{}{e} +} + +func flattenWorkspaceCustomParameters(input *databricks.WorkspaceCustomParameters) []interface{} { + if input == nil { return nil } parameters := make(map[string]interface{}) - if v := p.EnableNoPublicIP; v != nil { + if v := input.AmlWorkspaceID; v != nil { + if v.Value != nil { + parameters["machine_learning_workspace_id"] = *v.Value + } + } + + if v := input.EnableNoPublicIP; v != nil { if v.Value != nil { parameters["no_public_ip"] = *v.Value } } - if v := p.CustomPrivateSubnetName; v != nil { + if v := input.CustomPrivateSubnetName; v != nil { if v.Value != nil { parameters["private_subnet_name"] = *v.Value } } - if v := p.CustomPublicSubnetName; v != nil { + if v := input.CustomPublicSubnetName; v != nil { if v.Value != nil { parameters["public_subnet_name"] = *v.Value } } - if v := p.CustomVirtualNetworkID; v != nil { + if v := input.CustomVirtualNetworkID; v != nil { if v.Value != nil { parameters["virtual_network_id"] = *v.Value } @@ -344,14 +443,33 @@ func flattenWorkspaceCustomParameters(p *databricks.WorkspaceCustomParameters) [ return []interface{}{parameters} } -func expandWorkspaceCustomParameters(input []interface{}) *databricks.WorkspaceCustomParameters { +func expandWorkspaceCustomParameters(input []interface{}, customerManagedKeyEnabled, infrastructureEncryptionEnabled bool) *databricks.WorkspaceCustomParameters { if len(input) == 0 || input[0] == nil { - return nil + // This will be hit when there are no custom params set but we still + // need to pass the customerManagedKeyEnabled and infrastructureEncryptionEnabled + // flags anyway... + parameters := databricks.WorkspaceCustomParameters{} + + parameters.PrepareEncryption = &databricks.WorkspaceCustomBooleanParameter{ + Value: &customerManagedKeyEnabled, + } + + parameters.RequireInfrastructureEncryption = &databricks.WorkspaceCustomBooleanParameter{ + Value: &infrastructureEncryptionEnabled, + } + + return ¶meters } config := input[0].(map[string]interface{}) parameters := databricks.WorkspaceCustomParameters{} + if v, ok := config["machine_learning_workspace_id"].(string); ok && v != "" { + parameters.AmlWorkspaceID = &databricks.WorkspaceCustomStringParameter{ + Value: &v, + } + } + if v, ok := config["no_public_ip"].(bool); ok { parameters.EnableNoPublicIP = &databricks.WorkspaceCustomBooleanParameter{ Value: &v, @@ -364,6 +482,14 @@ func expandWorkspaceCustomParameters(input []interface{}) *databricks.WorkspaceC } } + parameters.PrepareEncryption = &databricks.WorkspaceCustomBooleanParameter{ + Value: &customerManagedKeyEnabled, + } + + parameters.RequireInfrastructureEncryption = &databricks.WorkspaceCustomBooleanParameter{ + Value: &infrastructureEncryptionEnabled, + } + if v := config["private_subnet_name"].(string); v != "" { parameters.CustomPrivateSubnetName = &databricks.WorkspaceCustomStringParameter{ Value: &v, @@ -378,3 +504,8 @@ func expandWorkspaceCustomParameters(input []interface{}) *databricks.WorkspaceC return ¶meters } + +func workspaceCustomParametersString() []string { + return []string{"custom_parameters.0.machine_learning_workspace_id", "custom_parameters.0.no_public_ip", + "custom_parameters.0.public_subnet_name", "custom_parameters.0.private_subnet_name", "custom_parameters.0.virtual_network_id"} +} diff --git a/azurerm/internal/services/databricks/databricks_workspace_resource_test.go b/azurerm/internal/services/databricks/databricks_workspace_resource_test.go index 24168dcdc264..3a7caaf3beaf 100644 --- a/azurerm/internal/services/databricks/databricks_workspace_resource_test.go +++ b/azurerm/internal/services/databricks/databricks_workspace_resource_test.go @@ -3,7 +3,6 @@ package databricks_test import ( "context" "fmt" - "regexp" "testing" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" @@ -26,9 +25,21 @@ func TestAccDatabricksWorkspace_basic(t *testing.T) { Config: r.basic(data, "standard"), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("managed_resource_group_id").Exists(), - acceptance.TestMatchResourceAttr(data.ResourceName, "workspace_url", regexp.MustCompile("azuredatabricks.net")), - check.That(data.ResourceName).Key("workspace_id").Exists(), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDatabricksWorkspace_sameName(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_databricks_workspace", "test") + r := DatabricksWorkspaceResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.sameName(data, "standard"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), @@ -50,6 +61,36 @@ func TestAccDatabricksWorkspace_requiresImport(t *testing.T) { }) } +func TestAccDatabricksWorkspace_machineLearning(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_databricks_workspace", "test") + r := DatabricksWorkspaceResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.machineLearning(data, "standard"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDatabricksWorkspace_infrastructureEncryption(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_databricks_workspace", "test") + r := DatabricksWorkspaceResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.infrastructureEncryption(data, "premium"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func TestAccDatabricksWorkspace_complete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_databricks_workspace", "test") r := DatabricksWorkspaceResource{} @@ -59,12 +100,6 @@ func TestAccDatabricksWorkspace_complete(t *testing.T) { Config: r.complete(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("managed_resource_group_id").Exists(), - check.That(data.ResourceName).Key("managed_resource_group_name").Exists(), - check.That(data.ResourceName).Key("custom_parameters.0.virtual_network_id").Exists(), - check.That(data.ResourceName).Key("tags.%").HasValue("2"), - check.That(data.ResourceName).Key("tags.Environment").HasValue("Production"), - check.That(data.ResourceName).Key("tags.Pricing").HasValue("Standard"), ), }, data.ImportStep(), @@ -80,12 +115,6 @@ func TestAccDatabricksWorkspace_update(t *testing.T) { Config: r.complete(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("managed_resource_group_id").Exists(), - check.That(data.ResourceName).Key("managed_resource_group_name").Exists(), - check.That(data.ResourceName).Key("custom_parameters.0.virtual_network_id").Exists(), - check.That(data.ResourceName).Key("tags.%").HasValue("2"), - check.That(data.ResourceName).Key("tags.Environment").HasValue("Production"), - check.That(data.ResourceName).Key("tags.Pricing").HasValue("Standard"), ), }, data.ImportStep(), @@ -93,10 +122,6 @@ func TestAccDatabricksWorkspace_update(t *testing.T) { Config: r.completeUpdate(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("managed_resource_group_id").Exists(), - check.That(data.ResourceName).Key("managed_resource_group_name").Exists(), - check.That(data.ResourceName).Key("tags.%").HasValue("1"), - check.That(data.ResourceName).Key("tags.Pricing").HasValue("Standard"), ), }, data.ImportStep(), @@ -166,6 +191,28 @@ resource "azurerm_databricks_workspace" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, sku) } +func (DatabricksWorkspaceResource) sameName(data acceptance.TestData, sku string) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-db-%d" + location = "%s" +} + +resource "azurerm_databricks_workspace" "test" { + name = "acctestDBW-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + sku = "%s" + + managed_resource_group_name = "acctestDBW-%d" +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, sku, data.RandomInteger) +} + func (DatabricksWorkspaceResource) requiresImport(data acceptance.TestData) string { template := DatabricksWorkspaceResource{}.basic(data, "standard") return fmt.Sprintf(` @@ -187,8 +234,7 @@ provider "azurerm" { } resource "azurerm_resource_group" "test" { - name = "acctestRG-db-%[1]d" - + name = "acctestRG-db-%[1]d" location = "%[2]s" } @@ -203,7 +249,7 @@ resource "azurerm_subnet" "public" { name = "acctest-sn-public-%[1]d" resource_group_name = azurerm_resource_group.test.name virtual_network_name = azurerm_virtual_network.test.name - address_prefix = "10.0.1.0/24" + address_prefixes = ["10.0.1.0/24"] delegation { name = "acctest" @@ -224,7 +270,7 @@ resource "azurerm_subnet" "private" { name = "acctest-sn-private-%[1]d" resource_group_name = azurerm_resource_group.test.name virtual_network_name = azurerm_virtual_network.test.name - address_prefix = "10.0.2.0/24" + address_prefixes = ["10.0.2.0/24"] delegation { name = "acctest" @@ -258,6 +304,8 @@ resource "azurerm_subnet_network_security_group_association" "private" { } resource "azurerm_databricks_workspace" "test" { + depends_on = [azurerm_subnet_network_security_group_association.public, azurerm_subnet_network_security_group_association.private] + name = "acctestDBW-%[1]d" resource_group_name = azurerm_resource_group.test.name location = azurerm_resource_group.test.location @@ -286,24 +334,185 @@ provider "azurerm" { } resource "azurerm_resource_group" "test" { - name = "acctestRG-db-%d" - location = "%s" + name = "acctestRG-db-%[1]d" + location = "%[2]s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctest-vnet-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + address_space = ["10.0.0.0/16"] +} + +resource "azurerm_subnet" "public" { + name = "acctest-sn-public-%[1]d" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name + address_prefixes = ["10.0.1.0/24"] + + delegation { + name = "acctest" + + service_delegation { + name = "Microsoft.Databricks/workspaces" + + actions = [ + "Microsoft.Network/virtualNetworks/subnets/join/action", + "Microsoft.Network/virtualNetworks/subnets/prepareNetworkPolicies/action", + "Microsoft.Network/virtualNetworks/subnets/unprepareNetworkPolicies/action", + ] + } + } +} + +resource "azurerm_subnet" "private" { + name = "acctest-sn-private-%[1]d" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name + address_prefixes = ["10.0.2.0/24"] + + delegation { + name = "acctest" + + service_delegation { + name = "Microsoft.Databricks/workspaces" + + actions = [ + "Microsoft.Network/virtualNetworks/subnets/join/action", + "Microsoft.Network/virtualNetworks/subnets/prepareNetworkPolicies/action", + "Microsoft.Network/virtualNetworks/subnets/unprepareNetworkPolicies/action", + ] + } + } +} + +resource "azurerm_network_security_group" "nsg" { + name = "acctest-nsg-private-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_subnet_network_security_group_association" "public" { + subnet_id = azurerm_subnet.public.id + network_security_group_id = azurerm_network_security_group.nsg.id +} + +resource "azurerm_subnet_network_security_group_association" "private" { + subnet_id = azurerm_subnet.private.id + network_security_group_id = azurerm_network_security_group.nsg.id } resource "azurerm_databricks_workspace" "test" { - name = "acctestDBW-%d" + depends_on = [azurerm_subnet_network_security_group_association.public, azurerm_subnet_network_security_group_association.private] + + name = "acctestDBW-%[1]d" resource_group_name = azurerm_resource_group.test.name location = azurerm_resource_group.test.location sku = "standard" - managed_resource_group_name = "acctestRG-DBW-%d-managed" + managed_resource_group_name = "acctestRG-DBW-%[1]d-managed" + + custom_parameters { + no_public_ip = true + public_subnet_name = azurerm_subnet.public.name + private_subnet_name = azurerm_subnet.private.name + virtual_network_id = azurerm_virtual_network.test.id + } tags = { Pricing = "Standard" } +} +`, data.RandomInteger, data.Locations.Primary) +} + +func (DatabricksWorkspaceResource) machineLearning(data acceptance.TestData, sku string) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +data "azurerm_client_config" "current" {} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-db-%[1]d" + location = "%[2]s" +} + +resource "azurerm_application_insights" "test" { + name = "acctest-ai-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + application_type = "web" +} + +resource "azurerm_key_vault" "test" { + name = "acctest-kv-%[4]s" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + tenant_id = data.azurerm_client_config.current.tenant_id + sku_name = "premium" + + purge_protection_enabled = true +} + +resource "azurerm_storage_account" "test" { + name = "acctestsa%[4]s" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + account_tier = "Standard" + account_replication_type = "LRS" + + static_website { + error_404_document = "error.html" + index_document = "index.html" + } +} + +resource "azurerm_machine_learning_workspace" "test" { + name = "acctest-mlws-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + application_insights_id = azurerm_application_insights.test.id + key_vault_id = azurerm_key_vault.test.id + storage_account_id = azurerm_storage_account.test.id + + identity { + type = "SystemAssigned" + } +} + +resource "azurerm_databricks_workspace" "test" { + name = "acctestDBW-%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + sku = "%[3]s" custom_parameters { - no_public_ip = false + machine_learning_workspace_id = azurerm_machine_learning_workspace.test.id } } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) +`, data.RandomInteger, data.Locations.Primary, sku, data.RandomString) +} + +func (DatabricksWorkspaceResource) infrastructureEncryption(data acceptance.TestData, sku string) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-db-%[1]d" + location = "%[2]s" +} + +resource "azurerm_databricks_workspace" "test" { + name = "acctestDBW-%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + sku = "%[3]s" + + infrastructure_encryption_enabled = true +} +`, data.RandomInteger, data.Locations.Primary, sku, data.RandomString) } diff --git a/azurerm/internal/services/databricks/parse/customer_managed_key.go b/azurerm/internal/services/databricks/parse/customer_managed_key.go new file mode 100644 index 000000000000..4cd836e3acbc --- /dev/null +++ b/azurerm/internal/services/databricks/parse/customer_managed_key.go @@ -0,0 +1,69 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type CustomerManagedKeyId struct { + SubscriptionId string + ResourceGroup string + CustomerMangagedKeyName string +} + +func NewCustomerManagedKeyID(subscriptionId, resourceGroup, customerMangagedKeyName string) CustomerManagedKeyId { + return CustomerManagedKeyId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + CustomerMangagedKeyName: customerMangagedKeyName, + } +} + +func (id CustomerManagedKeyId) String() string { + segments := []string{ + fmt.Sprintf("Customer Mangaged Key Name %q", id.CustomerMangagedKeyName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Customer Managed Key", segmentsStr) +} + +func (id CustomerManagedKeyId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Databricks/customerMangagedKey/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.CustomerMangagedKeyName) +} + +// CustomerManagedKeyID parses a CustomerManagedKey ID into an CustomerManagedKeyId struct +func CustomerManagedKeyID(input string) (*CustomerManagedKeyId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := CustomerManagedKeyId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.CustomerMangagedKeyName, err = id.PopSegment("customerMangagedKey"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/databricks/parse/customer_managed_key_test.go b/azurerm/internal/services/databricks/parse/customer_managed_key_test.go new file mode 100644 index 000000000000..7a212495fdfa --- /dev/null +++ b/azurerm/internal/services/databricks/parse/customer_managed_key_test.go @@ -0,0 +1,112 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = CustomerManagedKeyId{} + +func TestCustomerManagedKeyIDFormatter(t *testing.T) { + actual := NewCustomerManagedKeyID("12345678-1234-9876-4563-123456789012", "resGroup1", "workspace1").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Databricks/customerMangagedKey/workspace1" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestCustomerManagedKeyID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *CustomerManagedKeyId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing CustomerMangagedKeyName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Databricks/", + Error: true, + }, + + { + // missing value for CustomerMangagedKeyName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Databricks/customerMangagedKey/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Databricks/customerMangagedKey/workspace1", + Expected: &CustomerManagedKeyId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + CustomerMangagedKeyName: "workspace1", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.DATABRICKS/CUSTOMERMANGAGEDKEY/WORKSPACE1", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := CustomerManagedKeyID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.CustomerMangagedKeyName != v.Expected.CustomerMangagedKeyName { + t.Fatalf("Expected %q but got %q for CustomerMangagedKeyName", v.Expected.CustomerMangagedKeyName, actual.CustomerMangagedKeyName) + } + } +} diff --git a/azurerm/internal/services/databricks/registration.go b/azurerm/internal/services/databricks/registration.go index 2e1e068e913d..f714056b2ab7 100644 --- a/azurerm/internal/services/databricks/registration.go +++ b/azurerm/internal/services/databricks/registration.go @@ -28,6 +28,7 @@ func (r Registration) SupportedDataSources() map[string]*pluginsdk.Resource { // SupportedResources returns the supported Resources supported by this Service func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { return map[string]*pluginsdk.Resource{ - "azurerm_databricks_workspace": resourceDatabricksWorkspace(), + "azurerm_databricks_workspace": resourceDatabricksWorkspace(), + "azurerm_databricks_workspace_customer_managed_key": resourceDatabricksWorkspaceCustomerManagedKey(), } } diff --git a/azurerm/internal/services/databricks/resourceids.go b/azurerm/internal/services/databricks/resourceids.go index 86f381b624ce..4f7244e9f073 100644 --- a/azurerm/internal/services/databricks/resourceids.go +++ b/azurerm/internal/services/databricks/resourceids.go @@ -1,3 +1,4 @@ package databricks //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=Workspace -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Databricks/workspaces/workspace1 +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=CustomerManagedKey -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Databricks/customerMangagedKey/workspace1 diff --git a/azurerm/internal/services/databricks/validate/customer_managed_key_id.go b/azurerm/internal/services/databricks/validate/customer_managed_key_id.go new file mode 100644 index 000000000000..11bd9a600555 --- /dev/null +++ b/azurerm/internal/services/databricks/validate/customer_managed_key_id.go @@ -0,0 +1,23 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/databricks/parse" +) + +func CustomerManagedKeyID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := parse.CustomerManagedKeyID(v); err != nil { + errors = append(errors, err) + } + + return +} diff --git a/azurerm/internal/services/databricks/validate/customer_managed_key_id_test.go b/azurerm/internal/services/databricks/validate/customer_managed_key_id_test.go new file mode 100644 index 000000000000..a713d69bfbad --- /dev/null +++ b/azurerm/internal/services/databricks/validate/customer_managed_key_id_test.go @@ -0,0 +1,76 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import "testing" + +func TestCustomerManagedKeyID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + // missing CustomerMangagedKeyName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Databricks/", + Valid: false, + }, + + { + // missing value for CustomerMangagedKeyName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Databricks/customerMangagedKey/", + Valid: false, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Databricks/customerMangagedKey/workspace1", + Valid: true, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.DATABRICKS/CUSTOMERMANGAGEDKEY/WORKSPACE1", + Valid: false, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := CustomerManagedKeyID(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/azurerm/internal/services/datafactory/client/client.go b/azurerm/internal/services/datafactory/client/client.go index 2ad8d5e5bc5c..22121beb5f18 100644 --- a/azurerm/internal/services/datafactory/client/client.go +++ b/azurerm/internal/services/datafactory/client/client.go @@ -6,12 +6,14 @@ import ( ) type Client struct { - DatasetClient *datafactory.DatasetsClient - FactoriesClient *datafactory.FactoriesClient - IntegrationRuntimesClient *datafactory.IntegrationRuntimesClient - LinkedServiceClient *datafactory.LinkedServicesClient - PipelinesClient *datafactory.PipelinesClient - TriggersClient *datafactory.TriggersClient + DatasetClient *datafactory.DatasetsClient + FactoriesClient *datafactory.FactoriesClient + IntegrationRuntimesClient *datafactory.IntegrationRuntimesClient + LinkedServiceClient *datafactory.LinkedServicesClient + ManagedPrivateEndpointsClient *datafactory.ManagedPrivateEndpointsClient + ManagedVirtualNetworksClient *datafactory.ManagedVirtualNetworksClient + PipelinesClient *datafactory.PipelinesClient + TriggersClient *datafactory.TriggersClient } func NewClient(o *common.ClientOptions) *Client { @@ -27,6 +29,12 @@ func NewClient(o *common.ClientOptions) *Client { LinkedServiceClient := datafactory.NewLinkedServicesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&LinkedServiceClient.Client, o.ResourceManagerAuthorizer) + ManagedPrivateEndpointsClient := datafactory.NewManagedPrivateEndpointsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&ManagedPrivateEndpointsClient.Client, o.ResourceManagerAuthorizer) + + ManagedVirtualNetworksClient := datafactory.NewManagedVirtualNetworksClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&ManagedVirtualNetworksClient.Client, o.ResourceManagerAuthorizer) + PipelinesClient := datafactory.NewPipelinesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&PipelinesClient.Client, o.ResourceManagerAuthorizer) @@ -34,11 +42,13 @@ func NewClient(o *common.ClientOptions) *Client { o.ConfigureClient(&TriggersClient.Client, o.ResourceManagerAuthorizer) return &Client{ - DatasetClient: &DatasetClient, - FactoriesClient: &FactoriesClient, - IntegrationRuntimesClient: &IntegrationRuntimesClient, - LinkedServiceClient: &LinkedServiceClient, - PipelinesClient: &PipelinesClient, - TriggersClient: &TriggersClient, + DatasetClient: &DatasetClient, + FactoriesClient: &FactoriesClient, + IntegrationRuntimesClient: &IntegrationRuntimesClient, + LinkedServiceClient: &LinkedServiceClient, + ManagedPrivateEndpointsClient: &ManagedPrivateEndpointsClient, + ManagedVirtualNetworksClient: &ManagedVirtualNetworksClient, + PipelinesClient: &PipelinesClient, + TriggersClient: &TriggersClient, } } diff --git a/azurerm/internal/services/datafactory/data_factory.go b/azurerm/internal/services/datafactory/data_factory.go index 3a4acd2905ec..2401fd362d45 100644 --- a/azurerm/internal/services/datafactory/data_factory.go +++ b/azurerm/internal/services/datafactory/data_factory.go @@ -218,7 +218,7 @@ func suppressJsonOrderingDifference(_, old, new string, _ *pluginsdk.ResourceDat return utils.NormalizeJson(old) == utils.NormalizeJson(new) } -func expandAzureKeyVaultPassword(input []interface{}) *datafactory.AzureKeyVaultSecretReference { +func expandAzureKeyVaultSecretReference(input []interface{}) *datafactory.AzureKeyVaultSecretReference { if len(input) == 0 || input[0] == nil { return nil } @@ -234,7 +234,25 @@ func expandAzureKeyVaultPassword(input []interface{}) *datafactory.AzureKeyVault } } -func flattenAzureKeyVaultPassword(secretReference *datafactory.AzureKeyVaultSecretReference) []interface{} { +func flattenAzureKeyVaultConnectionString(input map[string]interface{}) []interface{} { + if input == nil { + return nil + } + + parameters := make(map[string]interface{}) + + if v, ok := input["store"].(map[string]interface{}); ok { + if v != nil { + parameters["linked_service_name"] = v["referenceName"].(string) + } + } + + parameters["secret_name"] = input["secretName"] + + return []interface{}{parameters} +} + +func flattenAzureKeyVaultSecretReference(secretReference *datafactory.AzureKeyVaultSecretReference) []interface{} { if secretReference == nil { return nil } @@ -261,34 +279,83 @@ func expandDataFactoryDatasetLocation(d *pluginsdk.ResourceData) datafactory.Bas return expandDataFactoryDatasetAzureBlobStorageLocation(d) } + if _, ok := d.GetOk("azure_blob_fs_location"); ok { + return expandDataFactoryDatasetAzureBlobFSLocation(d) + } + + if _, ok := d.GetOk("sftp_server_location"); ok { + return expandDataFactoryDatasetSFTPServerLocation(d) + } + return nil } +func expandDataFactoryDatasetSFTPServerLocation(d *pluginsdk.ResourceData) datafactory.BasicDatasetLocation { + sftpServerLocations := d.Get("sftp_server_location").([]interface{}) + if len(sftpServerLocations) == 0 || sftpServerLocations[0] == nil { + return nil + } + + props := sftpServerLocations[0].(map[string]interface{}) + + sftpServerLocation := datafactory.SftpLocation{ + FolderPath: props["path"].(string), + FileName: props["filename"].(string), + } + return sftpServerLocation +} + func expandDataFactoryDatasetHttpServerLocation(d *pluginsdk.ResourceData) datafactory.BasicDatasetLocation { - props := d.Get("http_server_location").([]interface{})[0].(map[string]interface{}) - relativeUrl := props["relative_url"].(string) - path := props["path"].(string) - filename := props["filename"].(string) + httpServerLocations := d.Get("http_server_location").([]interface{}) + if len(httpServerLocations) == 0 || httpServerLocations[0] == nil { + return nil + } + + props := httpServerLocations[0].(map[string]interface{}) httpServerLocation := datafactory.HTTPServerLocation{ - RelativeURL: relativeUrl, - FolderPath: path, - FileName: filename, + RelativeURL: props["relative_url"].(string), + FolderPath: props["path"].(string), + FileName: props["filename"].(string), } return httpServerLocation } func expandDataFactoryDatasetAzureBlobStorageLocation(d *pluginsdk.ResourceData) datafactory.BasicDatasetLocation { - props := d.Get("azure_blob_storage_location").([]interface{})[0].(map[string]interface{}) - container := props["container"].(string) - path := props["path"].(string) - filename := props["filename"].(string) + azureBlobStorageLocations := d.Get("azure_blob_storage_location").([]interface{}) + if len(azureBlobStorageLocations) == 0 || azureBlobStorageLocations[0] == nil { + return nil + } + + props := azureBlobStorageLocations[0].(map[string]interface{}) blobStorageLocation := datafactory.AzureBlobStorageLocation{ - Container: container, - FolderPath: path, - FileName: filename, + Container: props["container"].(string), + FolderPath: props["path"].(string), + FileName: props["filename"].(string), + } + return blobStorageLocation +} + +func expandDataFactoryDatasetAzureBlobFSLocation(d *pluginsdk.ResourceData) datafactory.BasicDatasetLocation { + azureBlobFsLocations := d.Get("azure_blob_fs_location").([]interface{}) + if len(azureBlobFsLocations) == 0 || azureBlobFsLocations[0] == nil { + return nil + } + + props := azureBlobFsLocations[0].(map[string]interface{}) + + blobStorageLocation := datafactory.AzureBlobFSLocation{ + FileSystem: props["file_system"].(string), + Type: datafactory.TypeBasicDatasetLocationTypeAzureBlobFSLocation, + } + if path := props["path"].(string); len(path) > 0 { + blobStorageLocation.FolderPath = path } + if filename := props["filename"].(string); len(filename) > 0 { + blobStorageLocation.FileName = filename + } + return blobStorageLocation } @@ -329,3 +396,126 @@ func flattenDataFactoryDatasetAzureBlobStorageLocation(input *datafactory.AzureB return []interface{}{result} } + +func flattenDataFactoryDatasetAzureBlobFSLocation(input *datafactory.AzureBlobFSLocation) []interface{} { + if input == nil { + return []interface{}{} + } + + fileSystem, path, fileName := "", "", "" + if input.FileSystem != nil { + if v, ok := input.FileSystem.(string); ok { + fileSystem = v + } + } + if input.FolderPath != nil { + if v, ok := input.FolderPath.(string); ok { + path = v + } + } + if input.FileName != nil { + if v, ok := input.FileName.(string); ok { + fileName = v + } + } + + return []interface{}{ + map[string]interface{}{ + "file_system": fileSystem, + "path": path, + "filename": fileName, + }, + } +} +func flattenDataFactoryDatasetSFTPLocation(input *datafactory.SftpLocation) []interface{} { + if input == nil { + return nil + } + result := make(map[string]interface{}) + + if input.FolderPath != nil { + result["path"] = input.FolderPath + } + if input.FileName != nil { + result["filename"] = input.FileName + } + + return []interface{}{result} +} + +func flattenDataFactoryDatasetCompression(input datafactory.BasicDatasetCompression) []interface{} { + if input == nil { + return nil + } + result := make(map[string]interface{}) + + if compression, ok := input.AsDatasetBZip2Compression(); ok { + result["type"] = compression.Type + } + if compression, ok := input.AsDatasetDeflateCompression(); ok { + result["type"] = compression.Type + } + if compression, ok := input.AsDatasetGZipCompression(); ok { + result["type"] = compression.Type + result["level"] = compression.Level + } + if compression, ok := input.AsDatasetTarCompression(); ok { + result["type"] = compression.Type + } + if compression, ok := input.AsDatasetTarGZipCompression(); ok { + result["type"] = compression.Type + result["level"] = compression.Level + } + if compression, ok := input.AsDatasetZipDeflateCompression(); ok { + result["type"] = compression.Type + result["level"] = compression.Level + } + + return []interface{}{result} +} + +func expandDataFactoryDatasetCompression(d *pluginsdk.ResourceData) datafactory.BasicDatasetCompression { + compression := d.Get("compression").([]interface{}) + if len(compression) == 0 || compression[0] == nil { + return nil + } + props := compression[0].(map[string]interface{}) + level := props["level"].(string) + compressionType := props["type"].(string) + + if datafactory.TypeBasicDatasetCompression(compressionType) == datafactory.TypeBasicDatasetCompressionTypeBZip2 { + return datafactory.DatasetBZip2Compression{ + Type: datafactory.TypeBasicDatasetCompression(compressionType), + } + } + if datafactory.TypeBasicDatasetCompression(compressionType) == datafactory.TypeBasicDatasetCompressionTypeDeflate { + return datafactory.DatasetDeflateCompression{ + Type: datafactory.TypeBasicDatasetCompression(compressionType), + } + } + if datafactory.TypeBasicDatasetCompression(compressionType) == datafactory.TypeBasicDatasetCompressionTypeGZip { + return datafactory.DatasetGZipCompression{ + Type: datafactory.TypeBasicDatasetCompression(compressionType), + Level: level, + } + } + if datafactory.TypeBasicDatasetCompression(compressionType) == datafactory.TypeBasicDatasetCompressionTypeTar { + return datafactory.DatasetTarCompression{ + Type: datafactory.TypeBasicDatasetCompression(compressionType), + } + } + if datafactory.TypeBasicDatasetCompression(compressionType) == datafactory.TypeBasicDatasetCompressionTypeTarGZip { + return datafactory.DatasetTarGZipCompression{ + Type: datafactory.TypeBasicDatasetCompression(compressionType), + Level: level, + } + } + if datafactory.TypeBasicDatasetCompression(compressionType) == datafactory.TypeBasicDatasetCompressionTypeZipDeflate { + return datafactory.DatasetZipDeflateCompression{ + Type: datafactory.TypeBasicDatasetCompression(compressionType), + Level: level, + } + } + + return nil +} diff --git a/azurerm/internal/services/datafactory/data_factory_custom_dataset_resource.go b/azurerm/internal/services/datafactory/data_factory_custom_dataset_resource.go new file mode 100644 index 000000000000..11e8f87a906e --- /dev/null +++ b/azurerm/internal/services/datafactory/data_factory_custom_dataset_resource.go @@ -0,0 +1,403 @@ +package datafactory + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/services/datafactory/mgmt/2018-06-01/datafactory" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceDataFactoryCustomDataset() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceDataFactoryCustomDatasetCreateUpdate, + Read: resourceDataFactoryCustomDatasetRead, + Update: resourceDataFactoryCustomDatasetCreateUpdate, + Delete: resourceDataFactoryCustomDatasetDelete, + + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.DataSetID(id) + return err + }), + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.LinkedServiceDatasetName, + }, + + "data_factory_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.DataFactoryID, + }, + + "linked_service": { + Type: pluginsdk.TypeList, + Required: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "parameters": { + Type: pluginsdk.TypeMap, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + }, + }, + }, + + "type": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + }, + + "type_properties_json": { + Type: pluginsdk.TypeString, + Required: true, + StateFunc: utils.NormalizeJson, + DiffSuppressFunc: suppressJsonOrderingDifference, + }, + + "additional_properties": { + Type: pluginsdk.TypeMap, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "annotations": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "description": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "folder": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "parameters": { + Type: pluginsdk.TypeMap, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "schema_json": { + Type: pluginsdk.TypeString, + Optional: true, + StateFunc: utils.NormalizeJson, + DiffSuppressFunc: suppressJsonOrderingDifference, + }, + }, + } +} + +func resourceDataFactoryCustomDatasetCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataFactory.DatasetClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + dataFactoryId, err := parse.DataFactoryID(d.Get("data_factory_id").(string)) + if err != nil { + return err + } + + id := parse.NewDataSetID(subscriptionId, dataFactoryId.ResourceGroup, dataFactoryId.FactoryName, d.Get("name").(string)) + if d.IsNewResource() { + existing, err := client.Get(ctx, id.ResourceGroup, id.FactoryName, id.Name, "") + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("checking for presence of existing %s: %+v", id, err) + } + } + if !utils.ResponseWasNotFound(existing.Response) { + return tf.ImportAsExistsError("azurerm_data_factory_custom_dataset", id.ID()) + } + } + + props := map[string]interface{}{ + "type": d.Get("type").(string), + "linkedServiceName": expandDataFactoryLinkedService(d.Get("linked_service").([]interface{})), + } + + typePropertiesJson := fmt.Sprintf(`{ "typeProperties": %s }`, d.Get("type_properties_json").(string)) + if err = json.Unmarshal([]byte(typePropertiesJson), &props); err != nil { + return err + } + + additionalProperties := d.Get("additional_properties").(map[string]interface{}) + for k, v := range additionalProperties { + props[k] = v + } + + if v, ok := d.GetOk("annotations"); ok { + props["annotations"] = v.([]interface{}) + } + + if v, ok := d.GetOk("description"); ok { + props["description"] = v.(string) + } + + if v, ok := d.GetOk("folder"); ok { + props["folder"] = &datafactory.DatasetFolder{ + Name: utils.String(v.(string)), + } + } + + if v, ok := d.GetOk("parameters"); ok { + props["parameters"] = expandDataFactoryParameters(v.(map[string]interface{})) + } + + if v, ok := d.GetOk("schema_json"); ok { + schemaJson := fmt.Sprintf(`{ "schema": %s }`, v.(string)) + if err = json.Unmarshal([]byte(schemaJson), &props); err != nil { + return err + } + } + + jsonData, err := json.Marshal(map[string]interface{}{ + "properties": props, + }) + if err != nil { + return err + } + + dataset := &datafactory.DatasetResource{} + if err := dataset.UnmarshalJSON(jsonData); err != nil { + return err + } + + if _, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.FactoryName, id.Name, *dataset, ""); err != nil { + return fmt.Errorf("creating/updating %s: %+v", id, err) + } + + d.SetId(id.ID()) + + return resourceDataFactoryCustomDatasetRead(d, meta) +} + +func resourceDataFactoryCustomDatasetRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataFactory.DatasetClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.DataSetID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, id.ResourceGroup, id.FactoryName, id.Name, "") + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + d.SetId("") + return nil + } + + return fmt.Errorf("retrieving %s: %+v", id, err) + } + + d.Set("name", id.Name) + d.Set("data_factory_id", parse.NewDataFactoryID(subscriptionId, id.ResourceGroup, id.FactoryName).ID()) + + byteArr, err := json.Marshal(resp.Properties) + if err != nil { + return err + } + + var m map[string]*json.RawMessage + if err = json.Unmarshal(byteArr, &m); err != nil { + return err + } + + description := "" + if v, ok := m["description"]; ok && v != nil { + if err := json.Unmarshal(*v, &description); err != nil { + return err + } + delete(m, "description") + } + d.Set("description", description) + + t := "" + if v, ok := m["type"]; ok && v != nil { + if err := json.Unmarshal(*v, &t); err != nil { + return err + } + delete(m, "type") + } + d.Set("type", t) + + folder := "" + if v, ok := m["folder"]; ok && v != nil { + datasetFolder := &datafactory.DatasetFolder{} + if err := json.Unmarshal(*v, datasetFolder); err != nil { + return err + } + if datasetFolder.Name != nil { + folder = *datasetFolder.Name + } + delete(m, "folder") + } + d.Set("folder", folder) + + annotations := make([]interface{}, 0) + if v, ok := m["annotations"]; ok && v != nil { + if err := json.Unmarshal(*v, &annotations); err != nil { + return err + } + delete(m, "annotations") + } + d.Set("annotations", annotations) + + parameters := make(map[string]*datafactory.ParameterSpecification) + if v, ok := m["parameters"]; ok && v != nil { + if err := json.Unmarshal(*v, ¶meters); err != nil { + return err + } + delete(m, "parameters") + } + if err := d.Set("parameters", flattenDataFactoryParameters(parameters)); err != nil { + return fmt.Errorf("setting `parameters`: %+v", err) + } + + var linkedService *datafactory.LinkedServiceReference + if v, ok := m["linkedServiceName"]; ok && v != nil { + linkedService = &datafactory.LinkedServiceReference{} + if err := json.Unmarshal(*v, linkedService); err != nil { + return err + } + delete(m, "linkedServiceName") + } + if err := d.Set("linked_service", flattenDataFactoryLinkedService(linkedService)); err != nil { + return fmt.Errorf("setting `linked_service`: %+v", err) + } + + // set "schema" + schemaJson := "" + if v, ok := m["schema"]; ok { + schemaBytes, err := json.Marshal(v) + if err != nil { + return err + } + schemaJson = string(schemaBytes) + delete(m, "schema") + } + d.Set("schema_json", schemaJson) + + // set "type_properties_json" + typePropertiesJson := "" + if v, ok := m["typeProperties"]; ok { + typePropertiesBytes, err := json.Marshal(v) + if err != nil { + return err + } + typePropertiesJson = string(typePropertiesBytes) + delete(m, "typeProperties") + } + d.Set("type_properties_json", typePropertiesJson) + + delete(m, "structure") + + // set "additional_properties" + additionalProperties := make(map[string]interface{}) + bytes, err := json.Marshal(m) + if err != nil { + return err + } + if err := json.Unmarshal(bytes, &additionalProperties); err != nil { + return err + } + d.Set("additional_properties", additionalProperties) + + return nil +} + +func resourceDataFactoryCustomDatasetDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataFactory.DatasetClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.DataSetID(d.Id()) + if err != nil { + return err + } + + if _, err := client.Delete(ctx, id.ResourceGroup, id.FactoryName, id.Name); err != nil { + return fmt.Errorf("deleting %s: %+v", id, err) + } + + return nil +} + +func expandDataFactoryLinkedService(input []interface{}) *datafactory.LinkedServiceReference { + if len(input) == 0 || input[0] == nil { + return nil + } + + v := input[0].(map[string]interface{}) + return &datafactory.LinkedServiceReference{ + ReferenceName: utils.String(v["name"].(string)), + Type: utils.String("LinkedServiceReference"), + Parameters: v["parameters"].(map[string]interface{}), + } +} + +func flattenDataFactoryLinkedService(input *datafactory.LinkedServiceReference) []interface{} { + if input == nil { + return []interface{}{} + } + + name := "" + if input.ReferenceName != nil { + name = *input.ReferenceName + } + + return []interface{}{ + map[string]interface{}{ + "name": name, + "parameters": input.Parameters, + }, + } +} diff --git a/azurerm/internal/services/datafactory/data_factory_custom_dataset_resource_test.go b/azurerm/internal/services/datafactory/data_factory_custom_dataset_resource_test.go new file mode 100644 index 000000000000..a2f2404a05ca --- /dev/null +++ b/azurerm/internal/services/datafactory/data_factory_custom_dataset_resource_test.go @@ -0,0 +1,393 @@ +package datafactory_test + +import ( + "context" + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type CustomDatasetResource struct { +} + +func TestAccDataFactoryCustomDataset_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_custom_dataset", "test") + r := CustomDatasetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataFactoryCustomDataset_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_custom_dataset", "test") + r := CustomDatasetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func TestAccDataFactoryCustomDataset_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_custom_dataset", "test") + r := CustomDatasetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataFactoryCustomDataset_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_custom_dataset", "test") + r := CustomDatasetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataFactoryCustomDataset_delimitedText(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_custom_dataset", "test") + r := CustomDatasetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.delimitedText(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataFactoryCustomDataset_avro(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_custom_dataset", "test") + r := CustomDatasetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.avro(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func (t CustomDatasetResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.DataSetID(state.ID) + if err != nil { + return nil, err + } + + resp, err := clients.DataFactory.DatasetClient.Get(ctx, id.ResourceGroup, id.FactoryName, id.Name, "") + if err != nil { + return nil, fmt.Errorf("reading %s: %+v", id, err) + } + + return utils.Bool(resp.ID != nil), nil +} + +func (r CustomDatasetResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_data_factory_custom_dataset" "test" { + name = "acctestds%d" + data_factory_id = azurerm_data_factory.test.id + type = "Json" + + linked_service { + name = azurerm_data_factory_linked_custom_service.test.name + } + + type_properties_json = < 0 { + computeProperties.VNetProperties.PublicIPs = utils.ExpandStringSlice(publicIPs) + } } return &computeProperties @@ -375,8 +599,11 @@ func expandDataFactoryIntegrationRuntimeAzureSsisComputeProperties(d *pluginsdk. func expandDataFactoryIntegrationRuntimeAzureSsisProperties(d *pluginsdk.ResourceData) *datafactory.IntegrationRuntimeSsisProperties { ssisProperties := &datafactory.IntegrationRuntimeSsisProperties{ - Edition: datafactory.IntegrationRuntimeEdition(d.Get("edition").(string)), - LicenseType: datafactory.IntegrationRuntimeLicenseType(d.Get("license_type").(string)), + LicenseType: datafactory.IntegrationRuntimeLicenseType(d.Get("license_type").(string)), + DataProxyProperties: expandDataFactoryIntegrationRuntimeAzureSsisProxy(d.Get("proxy").([]interface{})), + Edition: datafactory.IntegrationRuntimeEdition(d.Get("edition").(string)), + ExpressCustomSetupProperties: expandDataFactoryIntegrationRuntimeAzureSsisExpressCustomSetUp(d.Get("express_custom_setup").([]interface{})), + PackageStores: expandDataFactoryIntegrationRuntimeAzureSsisPackageStore(d.Get("package_store").([]interface{})), } if catalogInfos, ok := d.GetOk("catalog_info"); ok && len(catalogInfos.([]interface{})) > 0 { @@ -394,9 +621,13 @@ func expandDataFactoryIntegrationRuntimeAzureSsisProperties(d *pluginsdk.Resourc if adminPassword := catalogInfo["administrator_password"]; adminPassword.(string) != "" { ssisProperties.CatalogInfo.CatalogAdminPassword = &datafactory.SecureString{ Value: utils.String(adminPassword.(string)), - Type: datafactory.TypeTypeSecureString, + Type: datafactory.TypeSecureString, } } + + if dualStandbyPairName := catalogInfo["dual_standby_pair_name"].(string); dualStandbyPairName != "" { + ssisProperties.CatalogInfo.DualStandbyPairName = utils.String(dualStandbyPairName) + } } if customSetupScripts, ok := d.GetOk("custom_setup_script"); ok && len(customSetupScripts.([]interface{})) > 0 { @@ -404,7 +635,7 @@ func expandDataFactoryIntegrationRuntimeAzureSsisProperties(d *pluginsdk.Resourc sasToken := &datafactory.SecureString{ Value: utils.String(customSetupScript["sas_token"].(string)), - Type: datafactory.TypeTypeSecureString, + Type: datafactory.TypeSecureString, } ssisProperties.CustomSetupScriptProperties = &datafactory.IntegrationRuntimeCustomSetupScriptProperties{ @@ -416,15 +647,165 @@ func expandDataFactoryIntegrationRuntimeAzureSsisProperties(d *pluginsdk.Resourc return ssisProperties } +func expandDataFactoryIntegrationRuntimeAzureSsisProxy(input []interface{}) *datafactory.IntegrationRuntimeDataProxyProperties { + if len(input) == 0 || input[0] == nil { + return nil + } + raw := input[0].(map[string]interface{}) + + result := &datafactory.IntegrationRuntimeDataProxyProperties{ + ConnectVia: &datafactory.EntityReference{ + Type: datafactory.IntegrationRuntimeEntityReferenceTypeIntegrationRuntimeReference, + ReferenceName: utils.String(raw["self_hosted_integration_runtime_name"].(string)), + }, + StagingLinkedService: &datafactory.EntityReference{ + Type: datafactory.IntegrationRuntimeEntityReferenceTypeLinkedServiceReference, + ReferenceName: utils.String(raw["staging_storage_linked_service_name"].(string)), + }, + } + if path := raw["path"].(string); len(path) > 0 { + result.Path = utils.String(path) + } + return result +} + +func expandDataFactoryIntegrationRuntimeAzureSsisExpressCustomSetUp(input []interface{}) *[]datafactory.BasicCustomSetupBase { + if len(input) == 0 || input[0] == nil { + return nil + } + raw := input[0].(map[string]interface{}) + + result := make([]datafactory.BasicCustomSetupBase, 0) + if env := raw["environment"].(map[string]interface{}); len(env) > 0 { + for k, v := range env { + result = append(result, &datafactory.EnvironmentVariableSetup{ + Type: datafactory.TypeBasicCustomSetupBaseTypeEnvironmentVariableSetup, + EnvironmentVariableSetupTypeProperties: &datafactory.EnvironmentVariableSetupTypeProperties{ + VariableName: utils.String(k), + VariableValue: utils.String(v.(string)), + }, + }) + } + } + if powershellVersion := raw["powershell_version"].(string); powershellVersion != "" { + result = append(result, &datafactory.AzPowerShellSetup{ + Type: datafactory.TypeBasicCustomSetupBaseTypeAzPowerShellSetup, + AzPowerShellSetupTypeProperties: &datafactory.AzPowerShellSetupTypeProperties{ + Version: utils.String(powershellVersion), + }, + }) + } + if components := raw["component"].([]interface{}); len(components) > 0 { + for _, item := range components { + raw := item.(map[string]interface{}) + + var license datafactory.BasicSecretBase + if v := raw["license"].(string); v != "" { + license = &datafactory.SecureString{ + Type: datafactory.TypeSecureString, + Value: utils.String(v), + } + } else { + license = expandDataFactoryIntegrationRuntimeAzureSsisKeyVaultSecretReference(raw["key_vault_license"].([]interface{})) + } + + result = append(result, &datafactory.ComponentSetup{ + Type: datafactory.TypeBasicCustomSetupBaseTypeComponentSetup, + LicensedComponentSetupTypeProperties: &datafactory.LicensedComponentSetupTypeProperties{ + ComponentName: utils.String(raw["name"].(string)), + LicenseKey: license, + }, + }) + } + } + if cmdKeys := raw["command_key"].([]interface{}); len(cmdKeys) > 0 { + for _, item := range cmdKeys { + raw := item.(map[string]interface{}) + + var password datafactory.BasicSecretBase + if v := raw["password"].(string); v != "" { + password = &datafactory.SecureString{ + Type: datafactory.TypeSecureString, + Value: utils.String(v), + } + } else { + password = expandDataFactoryIntegrationRuntimeAzureSsisKeyVaultSecretReference(raw["key_vault_password"].([]interface{})) + } + + result = append(result, &datafactory.CmdkeySetup{ + Type: datafactory.TypeBasicCustomSetupBaseTypeCmdkeySetup, + CmdkeySetupTypeProperties: &datafactory.CmdkeySetupTypeProperties{ + TargetName: utils.String(raw["target_name"].(string)), + UserName: utils.String(raw["user_name"].(string)), + Password: password, + }, + }) + } + } + + return &result +} + +func expandDataFactoryIntegrationRuntimeAzureSsisPackageStore(input []interface{}) *[]datafactory.PackageStore { + if len(input) == 0 { + return nil + } + + result := make([]datafactory.PackageStore, 0) + for _, item := range input { + raw := item.(map[string]interface{}) + result = append(result, datafactory.PackageStore{ + Name: utils.String(raw["name"].(string)), + PackageStoreLinkedService: &datafactory.EntityReference{ + Type: datafactory.IntegrationRuntimeEntityReferenceTypeLinkedServiceReference, + ReferenceName: utils.String(raw["linked_service_name"].(string)), + }, + }) + } + return &result +} + +func expandDataFactoryIntegrationRuntimeAzureSsisKeyVaultSecretReference(input []interface{}) *datafactory.AzureKeyVaultSecretReference { + if len(input) == 0 || input[0] == nil { + return nil + } + + raw := input[0].(map[string]interface{}) + reference := &datafactory.AzureKeyVaultSecretReference{ + SecretName: raw["secret_name"].(string), + Store: &datafactory.LinkedServiceReference{ + Type: utils.String("LinkedServiceReference"), + ReferenceName: utils.String(raw["linked_service_name"].(string)), + }, + Type: datafactory.TypeAzureKeyVaultSecret, + } + if v := raw["secret_version"].(string); v != "" { + reference.SecretVersion = v + } + if v := raw["parameters"].(map[string]interface{}); len(v) > 0 { + reference.Store.Parameters = v + } + return reference +} + func flattenDataFactoryIntegrationRuntimeAzureSsisVnetIntegration(vnetProperties *datafactory.IntegrationRuntimeVNetProperties) []interface{} { if vnetProperties == nil { return []interface{}{} } + var vnetId, subnetName string + if vnetProperties.VNetID != nil { + vnetId = *vnetProperties.VNetID + } + if vnetProperties.Subnet != nil { + subnetName = *vnetProperties.Subnet + } + return []interface{}{ - map[string]string{ - "vnet_id": *vnetProperties.VNetID, - "subnet_name": *vnetProperties.Subnet, + map[string]interface{}{ + "vnet_id": vnetId, + "subnet_name": subnetName, + "public_ips": utils.FlattenStringSlice(vnetProperties.PublicIPs), }, } } @@ -434,20 +815,55 @@ func flattenDataFactoryIntegrationRuntimeAzureSsisCatalogInfo(ssisProperties *da return []interface{}{} } - catalogInfo := map[string]string{ - "server_endpoint": *ssisProperties.CatalogServerEndpoint, - "pricing_tier": string(ssisProperties.CatalogPricingTier), + var serverEndpoint, catalogAdminUserName, administratorPassword, dualStandbyPairName string + if ssisProperties.CatalogServerEndpoint != nil { + serverEndpoint = *ssisProperties.CatalogServerEndpoint } - if ssisProperties.CatalogAdminUserName != nil { - catalogInfo["administrator_login"] = *ssisProperties.CatalogAdminUserName + catalogAdminUserName = *ssisProperties.CatalogAdminUserName + } + if ssisProperties.DualStandbyPairName != nil { + dualStandbyPairName = *ssisProperties.DualStandbyPairName } + // read back if adminPassword, ok := d.GetOk("catalog_info.0.administrator_password"); ok { - catalogInfo["administrator_password"] = adminPassword.(string) + administratorPassword = adminPassword.(string) + } + + return []interface{}{ + map[string]interface{}{ + "server_endpoint": serverEndpoint, + "pricing_tier": string(ssisProperties.CatalogPricingTier), + "administrator_login": catalogAdminUserName, + "administrator_password": administratorPassword, + "dual_standby_pair_name": dualStandbyPairName, + }, + } +} + +func flattenDataFactoryIntegrationRuntimeAzureSsisProxy(input *datafactory.IntegrationRuntimeDataProxyProperties) []interface{} { + if input == nil { + return []interface{}{} } - return []interface{}{catalogInfo} + var path, selfHostedIntegrationRuntimeName, stagingStorageLinkedServiceName string + if input.Path != nil { + path = *input.Path + } + if input.ConnectVia != nil && input.ConnectVia.ReferenceName != nil { + selfHostedIntegrationRuntimeName = *input.ConnectVia.ReferenceName + } + if input.StagingLinkedService != nil && input.StagingLinkedService.ReferenceName != nil { + stagingStorageLinkedServiceName = *input.StagingLinkedService.ReferenceName + } + return []interface{}{ + map[string]interface{}{ + "path": path, + "self_hosted_integration_runtime_name": selfHostedIntegrationRuntimeName, + "staging_storage_linked_service_name": stagingStorageLinkedServiceName, + }, + } } func flattenDataFactoryIntegrationRuntimeAzureSsisCustomSetupScript(customSetupScriptProperties *datafactory.IntegrationRuntimeCustomSetupScriptProperties, d *pluginsdk.ResourceData) []interface{} { @@ -465,3 +881,175 @@ func flattenDataFactoryIntegrationRuntimeAzureSsisCustomSetupScript(customSetupS return []interface{}{customSetupScript} } + +func flattenDataFactoryIntegrationRuntimeAzureSsisPackageStore(input *[]datafactory.PackageStore) []interface{} { + if input == nil { + return nil + } + + result := make([]interface{}, 0) + for _, item := range *input { + var name, linkedServiceName string + if item.Name != nil { + name = *item.Name + } + if item.PackageStoreLinkedService != nil && item.PackageStoreLinkedService.ReferenceName != nil { + linkedServiceName = *item.PackageStoreLinkedService.ReferenceName + } + + result = append(result, map[string]interface{}{ + "name": name, + "linked_service_name": linkedServiceName, + }) + } + return result +} + +func flattenDataFactoryIntegrationRuntimeAzureSsisExpressCustomSetUp(input *[]datafactory.BasicCustomSetupBase, d *pluginsdk.ResourceData) []interface{} { + if input == nil { + return []interface{}{} + } + + // retrieve old state + oldState := make(map[string]interface{}) + if arr := d.Get("express_custom_setup").([]interface{}); len(arr) > 0 { + oldState = arr[0].(map[string]interface{}) + } + oldComponents := make([]interface{}, 0) + if rawComponent, ok := oldState["component"]; ok { + if v := rawComponent.([]interface{}); len(v) > 0 { + oldComponents = v + } + } + oldCmdKey := make([]interface{}, 0) + if rawCmdKey, ok := oldState["command_key"]; ok { + if v := rawCmdKey.([]interface{}); len(v) > 0 { + oldCmdKey = v + } + } + + env := make(map[string]interface{}) + powershellVersion := "" + components := make([]interface{}, 0) + cmdkeys := make([]interface{}, 0) + for _, item := range *input { + switch v := item.(type) { + case datafactory.AzPowerShellSetup: + if v.Version != nil { + powershellVersion = *v.Version + } + case datafactory.ComponentSetup: + var name string + if v.ComponentName != nil { + name = *v.ComponentName + } + var keyVaultLicense *datafactory.AzureKeyVaultSecretReference + if v.LicenseKey != nil { + if reference, ok := v.LicenseKey.AsAzureKeyVaultSecretReference(); ok { + keyVaultLicense = reference + } + } + components = append(components, map[string]interface{}{ + "name": name, + "key_vault_license": flattenDataFactoryIntegrationRuntimeAzureSsisKeyVaultSecretReference(keyVaultLicense), + "license": readBackSensitiveValue(oldComponents, "license", map[string]string{ + "name": name, + }), + }) + case datafactory.EnvironmentVariableSetup: + if v.VariableName != nil && v.VariableValue != nil { + env[*v.VariableName] = *v.VariableValue + } + case datafactory.CmdkeySetup: + var name, userName string + if v.TargetName != nil { + if v, ok := v.TargetName.(string); ok { + name = v + } + } + if v.UserName != nil { + if v, ok := v.UserName.(string); ok { + userName = v + } + } + var keyVaultPassword *datafactory.AzureKeyVaultSecretReference + if v.Password != nil { + if reference, ok := v.Password.AsAzureKeyVaultSecretReference(); ok { + keyVaultPassword = reference + } + } + cmdkeys = append(cmdkeys, map[string]interface{}{ + "target_name": name, + "user_name": userName, + "password": readBackSensitiveValue(oldCmdKey, "password", map[string]string{ + "target_name": name, + "user_name": userName, + }), + "key_vault_password": flattenDataFactoryIntegrationRuntimeAzureSsisKeyVaultSecretReference(keyVaultPassword), + }) + } + } + + return []interface{}{ + map[string]interface{}{ + "environment": env, + "powershell_version": powershellVersion, + "component": components, + "command_key": cmdkeys, + }, + } +} + +func flattenDataFactoryIntegrationRuntimeAzureSsisKeyVaultSecretReference(input *datafactory.AzureKeyVaultSecretReference) []interface{} { + if input == nil { + return []interface{}{} + } + var linkedServiceName, secretName, secretVersion string + var parameters map[string]interface{} + if input.SecretName != nil { + if v, ok := input.SecretName.(string); ok { + secretName = v + } + } + if input.SecretVersion != nil { + if v, ok := input.SecretVersion.(string); ok { + secretVersion = v + } + } + if input.Store != nil { + if input.Store.ReferenceName != nil { + linkedServiceName = *input.Store.ReferenceName + } + if input.Store.Parameters != nil { + parameters = input.Store.Parameters + } + } + return []interface{}{ + map[string]interface{}{ + "linked_service_name": linkedServiceName, + "parameters": parameters, + "secret_name": secretName, + "secret_version": secretVersion, + }, + } +} + +func readBackSensitiveValue(input []interface{}, propertyName string, filters map[string]string) string { + if len(input) == 0 { + return "" + } + for _, item := range input { + raw := item.(map[string]interface{}) + found := true + for k, v := range filters { + if raw[k].(string) != v { + found = false + break + } + } + if found { + return raw[propertyName].(string) + } + } + return "" +} diff --git a/azurerm/internal/services/datafactory/data_factory_integration_runtime_azure_ssis_resource_test.go b/azurerm/internal/services/datafactory/data_factory_integration_runtime_azure_ssis_resource_test.go index c3466fbed0ab..68ae714acc3c 100644 --- a/azurerm/internal/services/datafactory/data_factory_integration_runtime_azure_ssis_resource_test.go +++ b/azurerm/internal/services/datafactory/data_factory_integration_runtime_azure_ssis_resource_test.go @@ -45,30 +45,32 @@ func TestAccDataFactoryIntegrationRuntimeManagedSsis_complete(t *testing.T) { Config: r.complete(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("name").HasValue("managed-integration-runtime"), - check.That(data.ResourceName).Key("description").HasValue("acctest"), - check.That(data.ResourceName).Key("data_factory_name").Exists(), - check.That(data.ResourceName).Key("resource_group_name").Exists(), - check.That(data.ResourceName).Key("location").HasValue(azure.NormalizeLocation(data.Locations.Primary)), - check.That(data.ResourceName).Key("node_size").HasValue("Standard_D8_v3"), - check.That(data.ResourceName).Key("number_of_nodes").HasValue("2"), - check.That(data.ResourceName).Key("max_parallel_executions_per_node").HasValue("8"), - check.That(data.ResourceName).Key("edition").HasValue("Standard"), - check.That(data.ResourceName).Key("license_type").HasValue("LicenseIncluded"), - check.That(data.ResourceName).Key("vnet_integration.#").HasValue("1"), - check.That(data.ResourceName).Key("vnet_integration.0.vnet_id").Exists(), - check.That(data.ResourceName).Key("vnet_integration.0.subnet_name").Exists(), - check.That(data.ResourceName).Key("catalog_info.#").HasValue("1"), - check.That(data.ResourceName).Key("catalog_info.0.server_endpoint").Exists(), - check.That(data.ResourceName).Key("catalog_info.0.administrator_login").HasValue("ssis_catalog_admin"), - check.That(data.ResourceName).Key("catalog_info.0.administrator_password").HasValue("my-s3cret-p4ssword!"), - check.That(data.ResourceName).Key("catalog_info.0.pricing_tier").HasValue("Basic"), - check.That(data.ResourceName).Key("custom_setup_script.#").HasValue("1"), - check.That(data.ResourceName).Key("custom_setup_script.0.blob_container_uri").Exists(), - check.That(data.ResourceName).Key("custom_setup_script.0.sas_token").Exists(), ), }, - data.ImportStep("catalog_info.0.administrator_password", "custom_setup_script.0.sas_token"), + data.ImportStep( + "catalog_info.0.administrator_password", + "custom_setup_script.0.sas_token", + "express_custom_setup.0.component.0.license", + "express_custom_setup.0.command_key.0.password", + ), + }) +} + +func TestAccDataFactoryIntegrationRuntimeManagedSsis_keyVaultSecretReference(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_integration_runtime_azure_ssis", "test") + r := IntegrationRuntimeManagedSsisResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.keyVaultSecretReference(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep( + "catalog_info.0.administrator_password", + "custom_setup_script.0.sas_token", + ), }) } @@ -121,33 +123,48 @@ provider "azurerm" { } resource "azurerm_resource_group" "test" { - name = "acctestRG-df-%d" - location = "%s" + name = "acctestRG-df-%[1]d" + location = "%[2]s" } resource "azurerm_virtual_network" "test" { - name = "acctestvnet%d" + name = "acctestvnet%[1]d" address_space = ["10.0.0.0/16"] location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" } resource "azurerm_subnet" "test" { - name = "acctestsubnet%d" + name = "acctestsubnet%[1]d" resource_group_name = "${azurerm_resource_group.test.name}" virtual_network_name = "${azurerm_virtual_network.test.name}" address_prefix = "10.0.2.0/24" } +resource "azurerm_public_ip" "test1" { + name = "acctpip1%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + sku = "Standard" + allocation_method = "Static" + domain_name_label = "acctpip1%[1]d" +} + +resource "azurerm_public_ip" "test2" { + name = "acctpip2%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + sku = "Standard" + allocation_method = "Static" + domain_name_label = "acctpip2%[1]d" +} + resource "azurerm_storage_account" "test" { - name = "acctestsa%s" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - account_kind = "BlobStorage" - account_tier = "Standard" - account_replication_type = "LRS" - access_tier = "Hot" - enable_https_traffic_only = true + name = "acctestsa%[3]s" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + account_tier = "Standard" + account_replication_type = "LRS" } resource "azurerm_storage_container" "test" { @@ -156,6 +173,12 @@ resource "azurerm_storage_container" "test" { container_access_type = "private" } +resource "azurerm_storage_share" "test" { + name = "sharename" + storage_account_name = azurerm_storage_account.test.name + quota = 30 +} + data "azurerm_storage_account_blob_container_sas" "test" { connection_string = "${azurerm_storage_account.test.primary_connection_string}" container_name = "${azurerm_storage_container.test.name}" @@ -175,7 +198,7 @@ data "azurerm_storage_account_blob_container_sas" "test" { } resource "azurerm_sql_server" "test" { - name = "acctestsql%d" + name = "acctestsql%[1]d" resource_group_name = "${azurerm_resource_group.test.name}" location = "${azurerm_resource_group.test.location}" version = "12.0" @@ -184,13 +207,45 @@ resource "azurerm_sql_server" "test" { } resource "azurerm_data_factory" "test" { - name = "acctestdfirm%d" + name = "acctestdfirm%[1]d" location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" } +resource "azurerm_data_factory_linked_custom_service" "test" { + name = "acctestls%[1]d" + data_factory_id = azurerm_data_factory.test.id + type = "AzureBlobStorage" + type_properties_json = < 0 && accessTokenKeyVaultAuth[0] != nil { databricksProperties = &datafactory.AzureDatabricksLinkedServiceTypeProperties{ - AccessToken: expandAzureKeyVaultPassword(accessTokenKeyVaultAuth), + AccessToken: expandAzureKeyVaultSecretReference(accessTokenKeyVaultAuth), } } @@ -466,7 +466,7 @@ func resourceDataFactoryLinkedServiceDatabricksRead(d *pluginsdk.ResourceData, m // We only process AzureKeyVaultSecreReference because a string based access token is masked with asterisks in the GET response // so we can't set it if keyVaultPassword, ok := accessToken.AsAzureKeyVaultSecretReference(); ok { - if err := d.Set("key_vault_password", flattenAzureKeyVaultPassword(keyVaultPassword)); err != nil { + if err := d.Set("key_vault_password", flattenAzureKeyVaultSecretReference(keyVaultPassword)); err != nil { return fmt.Errorf("setting `key_vault_password`: %+v", err) } } diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_azure_file_storage_resource.go b/azurerm/internal/services/datafactory/data_factory_linked_service_azure_file_storage_resource.go index f3e7d797b9ee..3965829b90d7 100644 --- a/azurerm/internal/services/datafactory/data_factory_linked_service_azure_file_storage_resource.go +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_azure_file_storage_resource.go @@ -169,7 +169,7 @@ func resourceDataFactoryLinkedServiceAzureFileStorageCreateUpdate(d *pluginsdk.R fileStorageProperties := &datafactory.AzureFileStorageLinkedServiceTypeProperties{ ConnectionString: &datafactory.SecureString{ Value: utils.String(d.Get("connection_string").(string)), - Type: datafactory.TypeTypeSecureString, + Type: datafactory.TypeSecureString, }, FileShare: d.Get("file_share").(string), Host: d.Get("host").(string), @@ -180,7 +180,7 @@ func resourceDataFactoryLinkedServiceAzureFileStorageCreateUpdate(d *pluginsdk.R if password != "" { fileStorageProperties.Password = &datafactory.SecureString{ Value: utils.String(d.Get("password").(string)), - Type: datafactory.TypeTypeSecureString, + Type: datafactory.TypeSecureString, } } @@ -200,7 +200,7 @@ func resourceDataFactoryLinkedServiceAzureFileStorageCreateUpdate(d *pluginsdk.R if v, ok := d.GetOk("key_vault_password"); ok { password := v.([]interface{}) - fileStorageProperties.Password = expandAzureKeyVaultPassword(password) + fileStorageProperties.Password = expandAzureKeyVaultSecretReference(password) } if v, ok := d.GetOk("additional_properties"); ok { @@ -268,7 +268,7 @@ func resourceDataFactoryLinkedServiceAzureFileStorageRead(d *pluginsdk.ResourceD if password := fileStorage.Password; password != nil { if keyVaultPassword, ok := password.AsAzureKeyVaultSecretReference(); ok { - if err := d.Set("key_vault_password", flattenAzureKeyVaultPassword(keyVaultPassword)); err != nil { + if err := d.Set("key_vault_password", flattenAzureKeyVaultSecretReference(keyVaultPassword)); err != nil { return fmt.Errorf("setting `key_vault_password`: %+v", err) } } diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_azure_function_resource.go b/azurerm/internal/services/datafactory/data_factory_linked_service_azure_function_resource.go index 42aff344845d..95280dbc7159 100644 --- a/azurerm/internal/services/datafactory/data_factory_linked_service_azure_function_resource.go +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_azure_function_resource.go @@ -132,7 +132,7 @@ func resourceDataFactoryLinkedServiceAzureFunctionCreateUpdate(d *pluginsdk.Reso FunctionAppURL: d.Get("url").(string), FunctionKey: &datafactory.SecureString{ Value: utils.String(d.Get("key").(string)), - Type: datafactory.TypeTypeSecureString, + Type: datafactory.TypeSecureString, }, }, Type: datafactory.TypeBasicLinkedServiceTypeAzureFunction, diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_azure_search_resource.go b/azurerm/internal/services/datafactory/data_factory_linked_service_azure_search_resource.go new file mode 100644 index 000000000000..6417c1de57ae --- /dev/null +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_azure_search_resource.go @@ -0,0 +1,250 @@ +package datafactory + +import ( + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/services/datafactory/mgmt/2018-06-01/datafactory" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceDataFactoryLinkedServiceAzureSearch() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceDataFactoryLinkedServiceAzureSearchCreateUpdate, + Read: resourceDataFactoryLinkedServiceAzureSearchRead, + Update: resourceDataFactoryLinkedServiceAzureSearchCreateUpdate, + Delete: resourceDataFactoryLinkedServiceAzureSearchDelete, + + // TODO: replace this with an importer which validates the ID during import + Importer: pluginsdk.DefaultImporter(), + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.LinkedServiceDatasetName, + }, + + "data_factory_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.DataFactoryID, + }, + + "url": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "search_service_key": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "description": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "integration_runtime_name": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "parameters": { + Type: pluginsdk.TypeMap, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "annotations": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "additional_properties": { + Type: pluginsdk.TypeMap, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "encrypted_credential": { + Type: pluginsdk.TypeString, + Computed: true, + }, + }, + } +} + +func resourceDataFactoryLinkedServiceAzureSearchCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataFactory.LinkedServiceClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + dataFactoryId, err := parse.DataFactoryID(d.Get("data_factory_id").(string)) + if err != nil { + return err + } + + id := parse.NewLinkedServiceID(subscriptionId, dataFactoryId.ResourceGroup, dataFactoryId.FactoryName, d.Get("name").(string)) + if d.IsNewResource() { + existing, err := client.Get(ctx, id.ResourceGroup, id.FactoryName, id.Name, "") + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("checking for presence of existing %s: %+v", id, err) + } + } + if !utils.ResponseWasNotFound(existing.Response) { + return tf.ImportAsExistsError("azurerm_data_factory_linked_service_azure_search", id.ID()) + } + } + + searchLinkedService := &datafactory.AzureSearchLinkedService{ + AzureSearchLinkedServiceTypeProperties: &datafactory.AzureSearchLinkedServiceTypeProperties{ + URL: d.Get("url").(string), + Key: &datafactory.SecureString{ + Type: datafactory.TypeSecureString, + Value: utils.String(d.Get("search_service_key").(string)), + }, + }, + Description: utils.String(d.Get("description").(string)), + Type: datafactory.TypeBasicLinkedServiceTypeAzureSearch, + } + + if v, ok := d.GetOk("parameters"); ok { + searchLinkedService.Parameters = expandDataFactoryParameters(v.(map[string]interface{})) + } + + if v, ok := d.GetOk("integration_runtime_name"); ok { + searchLinkedService.ConnectVia = expandDataFactoryLinkedServiceIntegrationRuntime(v.(string)) + } + + if v, ok := d.GetOk("additional_properties"); ok { + searchLinkedService.AdditionalProperties = v.(map[string]interface{}) + } + + if v, ok := d.GetOk("annotations"); ok { + annotations := v.([]interface{}) + searchLinkedService.Annotations = &annotations + } + + linkedService := datafactory.LinkedServiceResource{ + Properties: searchLinkedService, + } + + if _, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.FactoryName, id.Name, linkedService, ""); err != nil { + return fmt.Errorf("creating/updating %s: %+v", id, err) + } + + d.SetId(id.ID()) + + return resourceDataFactoryLinkedServiceAzureSearchRead(d, meta) +} + +func resourceDataFactoryLinkedServiceAzureSearchRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataFactory.LinkedServiceClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.LinkedServiceID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, id.ResourceGroup, id.FactoryName, id.Name, "") + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + d.SetId("") + return nil + } + + return fmt.Errorf("retrieving %s: %+v", id, err) + } + + d.Set("name", id.Name) + d.Set("data_factory_id", parse.NewDataFactoryID(subscriptionId, id.ResourceGroup, id.FactoryName).ID()) + + linkedService, ok := resp.Properties.AsAzureSearchLinkedService() + if !ok { + return fmt.Errorf("classifiying %s: Expected: %q", id, datafactory.TypeBasicLinkedServiceTypeAzureSearch) + } + + if prop := linkedService.AzureSearchLinkedServiceTypeProperties; prop != nil { + url := "" + if v, ok := prop.URL.(string); ok { + url = v + } + d.Set("url", url) + + encryptedCredential := "" + if v, ok := prop.EncryptedCredential.(string); ok { + encryptedCredential = v + } + d.Set("encrypted_credential", encryptedCredential) + } + + d.Set("additional_properties", linkedService.AdditionalProperties) + d.Set("description", linkedService.Description) + + if err := d.Set("annotations", flattenDataFactoryAnnotations(linkedService.Annotations)); err != nil { + return fmt.Errorf("setting `annotations`: %+v", err) + } + + if err := d.Set("parameters", flattenDataFactoryParameters(linkedService.Parameters)); err != nil { + return fmt.Errorf("setting `parameters`: %+v", err) + } + + integrationRuntimeName := "" + if linkedService.ConnectVia != nil && linkedService.ConnectVia.ReferenceName != nil { + integrationRuntimeName = *linkedService.ConnectVia.ReferenceName + } + d.Set("integration_runtime_name", integrationRuntimeName) + + return nil +} + +func resourceDataFactoryLinkedServiceAzureSearchDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataFactory.LinkedServiceClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.LinkedServiceID(d.Id()) + if err != nil { + return err + } + + if _, err := client.Delete(ctx, id.ResourceGroup, id.FactoryName, id.Name); err != nil { + return fmt.Errorf("deleting %s: %+v", id, err) + } + + return nil +} diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_azure_search_resource_test.go b/azurerm/internal/services/datafactory/data_factory_linked_service_azure_search_resource_test.go new file mode 100644 index 000000000000..0f9d1f45b200 --- /dev/null +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_azure_search_resource_test.go @@ -0,0 +1,184 @@ +package datafactory_test + +import ( + "context" + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type LinkedServiceSearchResource struct { +} + +func TestAccDataFactoryLinkedServiceAzureSearch_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_linked_service_azure_search", "test") + r := LinkedServiceSearchResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("encrypted_credential").Exists(), + ), + }, + data.ImportStep("search_service_key"), + }) +} + +func TestAccDataFactoryLinkedServiceAzureSearch_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_linked_service_azure_search", "test") + r := LinkedServiceSearchResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func TestAccDataFactoryLinkedServiceAzureSearch_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_linked_service_azure_search", "test") + r := LinkedServiceSearchResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("search_service_key"), + }) +} + +func TestAccDataFactoryLinkedServiceAzureSearch_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_linked_service_azure_search", "test") + r := LinkedServiceSearchResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("search_service_key"), + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("search_service_key"), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("search_service_key"), + }) +} + +func (t LinkedServiceSearchResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.LinkedServiceID(state.ID) + if err != nil { + return nil, err + } + + resp, err := clients.DataFactory.LinkedServiceClient.Get(ctx, id.ResourceGroup, id.FactoryName, id.Name, "") + if err != nil { + return nil, fmt.Errorf("reading %s: %+v", id, err) + } + + return utils.Bool(resp.ID != nil), nil +} + +func (r LinkedServiceSearchResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_data_factory_linked_service_azure_search" "test" { + name = "acctestlssearch%d" + data_factory_id = azurerm_data_factory.test.id + url = join("", ["https://", azurerm_search_service.test.name, ".search.windows.net"]) + search_service_key = azurerm_search_service.test.primary_key +} +`, r.template(data), data.RandomInteger) +} + +func (r LinkedServiceSearchResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_data_factory_linked_service_azure_search" "import" { + name = azurerm_data_factory_linked_service_azure_search.test.name + data_factory_id = azurerm_data_factory_linked_service_azure_search.test.data_factory_id + url = azurerm_data_factory_linked_service_azure_search.test.url + search_service_key = azurerm_data_factory_linked_service_azure_search.test.search_service_key +} +`, r.basic(data)) +} + +func (r LinkedServiceSearchResource) complete(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_data_factory_linked_service_azure_search" "test" { + name = "acctestlssearch%d" + data_factory_id = azurerm_data_factory.test.id + url = join("", ["https://", azurerm_search_service.test.name, ".search.windows.net"]) + search_service_key = azurerm_search_service.test.primary_key + + annotations = ["test1", "test2", "test3"] + description = "test description" + + parameters = { + foo = "test1" + bar = "test2" + } + + additional_properties = { + foo = "test1" + bar = "test2" + } +} +`, r.template(data), data.RandomInteger) +} + +func (LinkedServiceSearchResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-df-%d" + location = "%s" +} + +resource "azurerm_data_factory" "test" { + name = "acctestdf%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_search_service" "test" { + name = "acctestsearchservice%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + sku = "standard" +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) +} diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_azure_sql_database_resource.go b/azurerm/internal/services/datafactory/data_factory_linked_service_azure_sql_database_resource.go index 0e1908601bfb..2c2997163d45 100644 --- a/azurerm/internal/services/datafactory/data_factory_linked_service_azure_sql_database_resource.go +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_azure_sql_database_resource.go @@ -54,7 +54,8 @@ func resourceDataFactoryLinkedServiceAzureSQLDatabase() *pluginsdk.Resource { "connection_string": { Type: pluginsdk.TypeString, - Required: true, + Optional: true, + ExactlyOneOf: []string{"connection_string", "key_vault_connection_string"}, DiffSuppressFunc: azureRmDataFactoryLinkedServiceConnectionStringDiff, ValidateFunc: validation.StringIsNotEmpty, }, @@ -65,6 +66,28 @@ func resourceDataFactoryLinkedServiceAzureSQLDatabase() *pluginsdk.Resource { ValidateFunc: validation.StringIsNotEmpty, }, + "key_vault_connection_string": { + Type: pluginsdk.TypeList, + Optional: true, + ExactlyOneOf: []string{"connection_string", "key_vault_connection_string"}, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "linked_service_name": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "secret_name": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + }, + }, + "key_vault_password": { Type: pluginsdk.TypeList, Optional: true, @@ -178,16 +201,20 @@ func resourceDataFactoryLinkedServiceAzureSQLDatabaseCreateUpdate(d *pluginsdk.R if v, ok := d.GetOk("connection_string"); ok { sqlDatabaseProperties.ConnectionString = &datafactory.SecureString{ Value: utils.String(v.(string)), - Type: datafactory.TypeTypeSecureString, + Type: datafactory.TypeSecureString, } } + if v, ok := d.GetOk("key_vault_connection_string"); ok { + sqlDatabaseProperties.ConnectionString = expandAzureKeyVaultSecretReference(v.([]interface{})) + } + if d.Get("use_managed_identity").(bool) { sqlDatabaseProperties.Tenant = utils.String(d.Get("tenant_id").(string)) } else { secureString := datafactory.SecureString{ Value: utils.String(d.Get("service_principal_key").(string)), - Type: datafactory.TypeTypeSecureString, + Type: datafactory.TypeSecureString, } sqlDatabaseProperties.ServicePrincipalID = utils.String(d.Get("service_principal_id").(string)) @@ -197,7 +224,7 @@ func resourceDataFactoryLinkedServiceAzureSQLDatabaseCreateUpdate(d *pluginsdk.R if v, ok := d.GetOk("key_vault_password"); ok { password := v.([]interface{}) - sqlDatabaseProperties.Password = expandAzureKeyVaultPassword(password) + sqlDatabaseProperties.Password = expandAzureKeyVaultSecretReference(password) } azureSQLDatabaseLinkedService := &datafactory.AzureSQLDatabaseLinkedService{ @@ -287,12 +314,22 @@ func resourceDataFactoryLinkedServiceAzureSQLDatabaseRead(d *pluginsdk.ResourceD } } + if sql.ConnectionString != nil { + if val, ok := sql.ConnectionString.(map[string]interface{}); ok { + if val["type"] != "SecureString" { + if err := d.Set("key_vault_connection_string", flattenAzureKeyVaultConnectionString(val)); err != nil { + return fmt.Errorf("setting `key_vault_connection_string`: %+v", err) + } + } + } + } + d.Set("additional_properties", sql.AdditionalProperties) d.Set("description", sql.Description) if password := sql.Password; password != nil { if keyVaultPassword, ok := password.AsAzureKeyVaultSecretReference(); ok { - if err := d.Set("key_vault_password", flattenAzureKeyVaultPassword(keyVaultPassword)); err != nil { + if err := d.Set("key_vault_password", flattenAzureKeyVaultSecretReference(keyVaultPassword)); err != nil { return fmt.Errorf("setting `key_vault_password`: %+v", err) } } diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_azure_sql_database_resource_test.go b/azurerm/internal/services/datafactory/data_factory_linked_service_azure_sql_database_resource_test.go index fccb70e2c7a1..180a06702223 100644 --- a/azurerm/internal/services/datafactory/data_factory_linked_service_azure_sql_database_resource_test.go +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_azure_sql_database_resource_test.go @@ -76,7 +76,7 @@ func TestAccDataFactoryLinkedServiceAzureSQLDatabase_managed_id(t *testing.T) { }) } -func TestAccDataFactoryLinkedServiceAzureSQLDatabase_KeyVaultReference(t *testing.T) { +func TestAccDataFactoryLinkedServiceAzureSQLDatabase_PasswordKeyVaultReference(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_data_factory_linked_service_azure_sql_database", "test") r := LinkedServiceAzureSQLDatabaseResource{} @@ -94,6 +94,25 @@ func TestAccDataFactoryLinkedServiceAzureSQLDatabase_KeyVaultReference(t *testin }) } +func TestAccDataFactoryLinkedServiceAzureSQLDatabase_ConnectionStringKeyVaultReference(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_linked_service_azure_sql_database", "test") + r := LinkedServiceAzureSQLDatabaseResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.connection_string_key_vault_reference(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("key_vault_connection_string.0.linked_service_name").HasValue("linkkv"), + check.That(data.ResourceName).Key("key_vault_connection_string.0.secret_name").HasValue("connection_string"), + check.That(data.ResourceName).Key("key_vault_password.0.linked_service_name").HasValue("linkkv"), + check.That(data.ResourceName).Key("key_vault_password.0.secret_name").HasValue("password"), + ), + }, + data.ImportStep(), + }) +} + func (t LinkedServiceAzureSQLDatabaseResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := azure.ParseAzureResourceID(state.ID) if err != nil { @@ -289,3 +308,55 @@ resource "azurerm_data_factory_linked_service_azure_sql_database" "test" { } `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger) } + +func (LinkedServiceAzureSQLDatabaseResource) connection_string_key_vault_reference(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +data "azurerm_client_config" "current" {} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-df-%d" + location = "%s" +} + +resource "azurerm_key_vault" "test" { + name = "acctkv%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + tenant_id = data.azurerm_client_config.current.tenant_id + sku_name = "standard" +} + +resource "azurerm_data_factory" "test" { + name = "acctestdf%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_data_factory_linked_service_key_vault" "test" { + name = "linkkv" + resource_group_name = azurerm_resource_group.test.name + data_factory_name = azurerm_data_factory.test.name + key_vault_id = azurerm_key_vault.test.id +} + +resource "azurerm_data_factory_linked_service_azure_sql_database" "test" { + name = "acctestlssql%d" + resource_group_name = azurerm_resource_group.test.name + data_factory_name = azurerm_data_factory.test.name + + key_vault_connection_string { + linked_service_name = azurerm_data_factory_linked_service_key_vault.test.name + secret_name = "connection_string" + } + + key_vault_password { + linked_service_name = azurerm_data_factory_linked_service_key_vault.test.name + secret_name = "password" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger) +} diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_azure_table_storage_resource.go b/azurerm/internal/services/datafactory/data_factory_linked_service_azure_table_storage_resource.go index 6530e7b6c35d..0a6c78673f28 100644 --- a/azurerm/internal/services/datafactory/data_factory_linked_service_azure_table_storage_resource.go +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_azure_table_storage_resource.go @@ -125,7 +125,7 @@ func resourceDataFactoryLinkedServiceTableStorageCreateUpdate(d *pluginsdk.Resou AzureStorageLinkedServiceTypeProperties: &datafactory.AzureStorageLinkedServiceTypeProperties{ ConnectionString: &datafactory.SecureString{ Value: utils.String(d.Get("connection_string").(string)), - Type: datafactory.TypeTypeSecureString, + Type: datafactory.TypeSecureString, }, }, Type: datafactory.TypeBasicLinkedServiceTypeAzureTableStorage, diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_cosmosdb_resource.go b/azurerm/internal/services/datafactory/data_factory_linked_service_cosmosdb_resource.go index 9d72db6b618d..55cc6bb9567c 100644 --- a/azurerm/internal/services/datafactory/data_factory_linked_service_cosmosdb_resource.go +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_cosmosdb_resource.go @@ -154,7 +154,7 @@ func resourceDataFactoryLinkedServiceCosmosDbCreateUpdate(d *pluginsdk.ResourceD if isAccountDetailUsed { accountKeySecureString := datafactory.SecureString{ Value: &accountKey, - Type: datafactory.TypeTypeSecureString, + Type: datafactory.TypeSecureString, } cosmosdbProperties.AccountEndpoint = endpoint cosmosdbProperties.AccountKey = accountKeySecureString @@ -163,7 +163,7 @@ func resourceDataFactoryLinkedServiceCosmosDbCreateUpdate(d *pluginsdk.ResourceD connectionString := d.Get("connection_string").(string) connectionStringSecureString := datafactory.SecureString{ Value: &connectionString, - Type: datafactory.TypeTypeSecureString, + Type: datafactory.TypeSecureString, } cosmosdbProperties.ConnectionString = connectionStringSecureString cosmosdbProperties.Database = databaseName diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_cosmosdb_resource_test.go b/azurerm/internal/services/datafactory/data_factory_linked_service_cosmosdb_resource_test.go index 919e3ad0c29f..b6acee0f0e7c 100644 --- a/azurerm/internal/services/datafactory/data_factory_linked_service_cosmosdb_resource_test.go +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_cosmosdb_resource_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_data_lake_storage_gen2_resource.go b/azurerm/internal/services/datafactory/data_factory_linked_service_data_lake_storage_gen2_resource.go index 7f679c88449b..5e02bb116be3 100644 --- a/azurerm/internal/services/datafactory/data_factory_linked_service_data_lake_storage_gen2_resource.go +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_data_lake_storage_gen2_resource.go @@ -62,8 +62,8 @@ func resourceDataFactoryLinkedServiceDataLakeStorageGen2() *pluginsdk.Resource { Type: pluginsdk.TypeBool, Optional: true, Default: false, - ConflictsWith: []string{"service_principal_key", "service_principal_id"}, - AtLeastOneOf: []string{"service_principal_key", "service_principal_id", "use_managed_identity"}, + ConflictsWith: []string{"service_principal_key", "service_principal_id", "storage_account_key"}, + AtLeastOneOf: []string{"service_principal_key", "service_principal_id", "storage_account_key", "use_managed_identity"}, }, "service_principal_id": { @@ -71,8 +71,8 @@ func resourceDataFactoryLinkedServiceDataLakeStorageGen2() *pluginsdk.Resource { Optional: true, ValidateFunc: validation.IsUUID, RequiredWith: []string{"service_principal_key"}, - ConflictsWith: []string{"use_managed_identity"}, - AtLeastOneOf: []string{"service_principal_key", "service_principal_id", "use_managed_identity"}, + ConflictsWith: []string{"storage_account_key", "use_managed_identity"}, + AtLeastOneOf: []string{"service_principal_key", "service_principal_id", "storage_account_key", "use_managed_identity"}, }, "service_principal_key": { @@ -80,14 +80,23 @@ func resourceDataFactoryLinkedServiceDataLakeStorageGen2() *pluginsdk.Resource { Optional: true, ValidateFunc: validation.StringIsNotEmpty, RequiredWith: []string{"service_principal_id"}, - ConflictsWith: []string{"use_managed_identity"}, - AtLeastOneOf: []string{"service_principal_key", "service_principal_id", "use_managed_identity"}, + ConflictsWith: []string{"storage_account_key", "use_managed_identity"}, + AtLeastOneOf: []string{"service_principal_key", "service_principal_id", "storage_account_key", "use_managed_identity"}, + }, + + "storage_account_key": { + Type: pluginsdk.TypeString, + Optional: true, + ConflictsWith: []string{"service_principal_id", "service_principal_key", "use_managed_identity"}, + AtLeastOneOf: []string{"service_principal_key", "service_principal_id", "storage_account_key", "use_managed_identity"}, }, "tenant": { - Type: pluginsdk.TypeString, - Optional: true, - ValidateFunc: validation.StringIsNotEmpty, + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + RequiredWith: []string{"service_principal_id"}, + ConflictsWith: []string{"storage_account_key", "use_managed_identity"}, }, "description": { @@ -155,13 +164,20 @@ func resourceDataFactoryLinkedServiceDataLakeStorageGen2CreateUpdate(d *pluginsd if d.Get("use_managed_identity").(bool) { datalakeStorageGen2Properties = &datafactory.AzureBlobFSLinkedServiceTypeProperties{ - URL: utils.String(d.Get("url").(string)), - Tenant: utils.String(d.Get("tenant").(string)), + URL: utils.String(d.Get("url").(string)), + } + } else if v, ok := d.GetOk("storage_account_key"); ok { + datalakeStorageGen2Properties = &datafactory.AzureBlobFSLinkedServiceTypeProperties{ + URL: utils.String(d.Get("url").(string)), + AccountKey: datafactory.SecureString{ + Value: utils.String(v.(string)), + Type: datafactory.TypeSecureString, + }, } } else { secureString := datafactory.SecureString{ Value: utils.String(d.Get("service_principal_key").(string)), - Type: datafactory.TypeTypeSecureString, + Type: datafactory.TypeSecureString, } datalakeStorageGen2Properties = &datafactory.AzureBlobFSLinkedServiceTypeProperties{ @@ -253,9 +269,6 @@ func resourceDataFactoryLinkedServiceDataLakeStorageGen2Read(d *pluginsdk.Resour if dataLakeStorageGen2.ServicePrincipalID != nil { d.Set("service_principal_id", dataLakeStorageGen2.ServicePrincipalID) - d.Set("use_managed_identity", false) - } else { - d.Set("use_managed_identity", true) } if dataLakeStorageGen2.URL != nil { diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_data_lake_storage_gen2_resource_test.go b/azurerm/internal/services/datafactory/data_factory_linked_service_data_lake_storage_gen2_resource_test.go index c1466b10b03d..ec64d32b83f0 100644 --- a/azurerm/internal/services/datafactory/data_factory_linked_service_data_lake_storage_gen2_resource_test.go +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_data_lake_storage_gen2_resource_test.go @@ -27,7 +27,22 @@ func TestAccDataFactoryLinkedServiceDataLakeStorageGen2_basic(t *testing.T) { check.That(data.ResourceName).ExistsInAzure(r), ), }, - data.ImportStep("service_principal_key"), + data.ImportStep("service_principal_key", "use_managed_identity"), + }) +} + +func TestAccDataFactoryLinkedServiceDataLakeStorageGen2_accountKeyAuth(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_linked_service_data_lake_storage_gen2", "test") + r := LinkedServiceDataLakeStorageGen2Resource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.accountKeyAuth(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("storage_account_key", "use_managed_identity"), }) } @@ -42,7 +57,7 @@ func TestAccDataFactoryLinkedServiceDataLakeStorageGen2_managed_id(t *testing.T) check.That(data.ResourceName).ExistsInAzure(r), ), }, - data.ImportStep(), + data.ImportStep("use_managed_identity"), }) } @@ -71,7 +86,7 @@ func TestAccDataFactoryLinkedServiceDataLakeStorageGen2_update(t *testing.T) { check.That(data.ResourceName).Key("description").HasValue("test description 2"), ), }, - data.ImportStep("service_principal_key"), + data.ImportStep("service_principal_key", "use_managed_identity"), }) } @@ -118,12 +133,48 @@ resource "azurerm_data_factory_linked_service_data_lake_storage_gen2" "test" { data_factory_name = azurerm_data_factory.test.name service_principal_id = data.azurerm_client_config.current.client_id service_principal_key = "testkey" - tenant = "11111111-1111-1111-1111-111111111111" url = "https://test.azure.com" } `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) } +func (LinkedServiceDataLakeStorageGen2Resource) accountKeyAuth(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-df-%d" + location = "%s" +} + +resource "azurerm_data_factory" "test" { + name = "acctestdf%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_storage_account" "test" { + name = "testaccsa%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_tier = "Standard" + account_replication_type = "LRS" + is_hns_enabled = true + allow_blob_public_access = true +} + +resource "azurerm_data_factory_linked_service_data_lake_storage_gen2" "test" { + name = "acctestDataLake%d" + resource_group_name = azurerm_resource_group.test.name + data_factory_name = azurerm_data_factory.test.name + url = azurerm_storage_account.test.primary_dfs_endpoint + storage_account_key = azurerm_storage_account.test.primary_access_key +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomString, data.RandomInteger) +} + func (LinkedServiceDataLakeStorageGen2Resource) managed_id(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_kusto_resource.go b/azurerm/internal/services/datafactory/data_factory_linked_service_kusto_resource.go new file mode 100644 index 000000000000..bbf7c8543212 --- /dev/null +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_kusto_resource.go @@ -0,0 +1,288 @@ +package datafactory + +import ( + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/services/datafactory/mgmt/2018-06-01/datafactory" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceDataFactoryLinkedServiceKusto() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceDataFactoryLinkedServiceKustoCreateUpdate, + Read: resourceDataFactoryLinkedServiceKustoRead, + Update: resourceDataFactoryLinkedServiceKustoCreateUpdate, + Delete: resourceDataFactoryLinkedServiceKustoDelete, + + // TODO: replace this with an importer which validates the ID during import + Importer: pluginsdk.DefaultImporter(), + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.LinkedServiceDatasetName, + }, + + "data_factory_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.DataFactoryID, + }, + + "kusto_endpoint": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "kusto_database_name": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "use_managed_identity": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: false, + ExactlyOneOf: []string{"service_principal_id", "use_managed_identity"}, + }, + + "service_principal_id": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.IsUUID, + RequiredWith: []string{"service_principal_key"}, + ExactlyOneOf: []string{"service_principal_id", "use_managed_identity"}, + }, + + "service_principal_key": { + Type: pluginsdk.TypeString, + Optional: true, + Sensitive: true, + ValidateFunc: validation.StringIsNotEmpty, + RequiredWith: []string{"service_principal_id"}, + }, + + "tenant": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + RequiredWith: []string{"service_principal_id"}, + }, + + "description": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "integration_runtime_name": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "parameters": { + Type: pluginsdk.TypeMap, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "annotations": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "additional_properties": { + Type: pluginsdk.TypeMap, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + }, + } +} + +func resourceDataFactoryLinkedServiceKustoCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataFactory.LinkedServiceClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + dataFactoryId, err := parse.DataFactoryID(d.Get("data_factory_id").(string)) + if err != nil { + return err + } + + id := parse.NewLinkedServiceID(subscriptionId, dataFactoryId.ResourceGroup, dataFactoryId.FactoryName, d.Get("name").(string)) + if d.IsNewResource() { + existing, err := client.Get(ctx, id.ResourceGroup, id.FactoryName, id.Name, "") + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("checking for presence of existing %s: %+v", id, err) + } + } + if !utils.ResponseWasNotFound(existing.Response) { + return tf.ImportAsExistsError("azurerm_data_factory_linked_service_kusto", id.ID()) + } + } + + kustoLinkedService := &datafactory.AzureDataExplorerLinkedService{ + AzureDataExplorerLinkedServiceTypeProperties: &datafactory.AzureDataExplorerLinkedServiceTypeProperties{ + Endpoint: d.Get("kusto_endpoint").(string), + Database: d.Get("kusto_database_name").(string), + }, + Description: utils.String(d.Get("description").(string)), + Type: datafactory.TypeBasicLinkedServiceTypeAzureDataExplorer, + } + + if d.Get("use_managed_identity").(bool) { + kustoLinkedService.AzureDataExplorerLinkedServiceTypeProperties = &datafactory.AzureDataExplorerLinkedServiceTypeProperties{ + Endpoint: d.Get("kusto_endpoint").(string), + Database: d.Get("kusto_database_name").(string), + } + } else if v, ok := d.GetOk("service_principal_id"); ok { + kustoLinkedService.AzureDataExplorerLinkedServiceTypeProperties = &datafactory.AzureDataExplorerLinkedServiceTypeProperties{ + Endpoint: d.Get("kusto_endpoint").(string), + Database: d.Get("kusto_database_name").(string), + ServicePrincipalID: v.(string), + ServicePrincipalKey: &datafactory.SecureString{ + Value: utils.String(d.Get("service_principal_key").(string)), + Type: datafactory.TypeSecureString, + }, + Tenant: utils.String(d.Get("tenant").(string)), + } + } else { + return fmt.Errorf("one of Managed Identity and service principal authentication must be set") + } + + if v, ok := d.GetOk("parameters"); ok { + kustoLinkedService.Parameters = expandDataFactoryParameters(v.(map[string]interface{})) + } + + if v, ok := d.GetOk("integration_runtime_name"); ok { + kustoLinkedService.ConnectVia = expandDataFactoryLinkedServiceIntegrationRuntime(v.(string)) + } + + if v, ok := d.GetOk("additional_properties"); ok { + kustoLinkedService.AdditionalProperties = v.(map[string]interface{}) + } + + if v, ok := d.GetOk("annotations"); ok { + annotations := v.([]interface{}) + kustoLinkedService.Annotations = &annotations + } + + linkedService := datafactory.LinkedServiceResource{ + Properties: kustoLinkedService, + } + + if _, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.FactoryName, id.Name, linkedService, ""); err != nil { + return fmt.Errorf("creating/updating %s: %+v", id, err) + } + + d.SetId(id.ID()) + + return resourceDataFactoryLinkedServiceKustoRead(d, meta) +} + +func resourceDataFactoryLinkedServiceKustoRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataFactory.LinkedServiceClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.LinkedServiceID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, id.ResourceGroup, id.FactoryName, id.Name, "") + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + d.SetId("") + return nil + } + + return fmt.Errorf("retrieving %s: %+v", id, err) + } + + linkedService, ok := resp.Properties.AsAzureDataExplorerLinkedService() + if !ok { + return fmt.Errorf("classifiying %s: Expected: %q", id, datafactory.TypeBasicLinkedServiceTypeAzureDataExplorer) + } + + d.Set("name", id.Name) + d.Set("data_factory_id", parse.NewDataFactoryID(subscriptionId, id.ResourceGroup, id.FactoryName).ID()) + d.Set("additional_properties", linkedService.AdditionalProperties) + d.Set("description", linkedService.Description) + if err := d.Set("annotations", flattenDataFactoryAnnotations(linkedService.Annotations)); err != nil { + return fmt.Errorf("setting `annotations`: %+v", err) + } + if err := d.Set("parameters", flattenDataFactoryParameters(linkedService.Parameters)); err != nil { + return fmt.Errorf("setting `parameters`: %+v", err) + } + + integrationRuntimeName := "" + if linkedService.ConnectVia != nil && linkedService.ConnectVia.ReferenceName != nil { + integrationRuntimeName = *linkedService.ConnectVia.ReferenceName + } + d.Set("integration_runtime_name", integrationRuntimeName) + + if prop := linkedService.AzureDataExplorerLinkedServiceTypeProperties; prop != nil { + d.Set("kusto_endpoint", prop.Endpoint) + d.Set("kusto_database_name", prop.Database) + d.Set("tenant", prop.Tenant) + d.Set("service_principal_id", prop.ServicePrincipalID) + + useManagedIdentity := true + if prop.ServicePrincipalID != nil { + useManagedIdentity = false + } + d.Set("use_managed_identity", useManagedIdentity) + } + + return nil +} + +func resourceDataFactoryLinkedServiceKustoDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataFactory.LinkedServiceClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.LinkedServiceID(d.Id()) + if err != nil { + return err + } + + if _, err := client.Delete(ctx, id.ResourceGroup, id.FactoryName, id.Name); err != nil { + return fmt.Errorf("deleting %s: %+v", id, err) + } + + return nil +} diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_kusto_resource_test.go b/azurerm/internal/services/datafactory/data_factory_linked_service_kusto_resource_test.go new file mode 100644 index 000000000000..34e752086bb3 --- /dev/null +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_kusto_resource_test.go @@ -0,0 +1,235 @@ +package datafactory_test + +import ( + "context" + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type LinkedServiceKustoResource struct { +} + +func TestAccDataFactoryLinkedServiceKusto_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_linked_service_kusto", "test") + r := LinkedServiceKustoResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataFactoryLinkedServiceKusto_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_linked_service_kusto", "test") + r := LinkedServiceKustoResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func TestAccDataFactoryLinkedServiceKusto_servicePrincipal(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_linked_service_kusto", "test") + r := LinkedServiceKustoResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.servicePrincipal(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("service_principal_key"), + }) +} + +func TestAccDataFactoryLinkedServiceKusto_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_linked_service_kusto", "test") + r := LinkedServiceKustoResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataFactoryLinkedServiceKusto_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_linked_service_kusto", "test") + r := LinkedServiceKustoResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func (t LinkedServiceKustoResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.LinkedServiceID(state.ID) + if err != nil { + return nil, err + } + + resp, err := clients.DataFactory.LinkedServiceClient.Get(ctx, id.ResourceGroup, id.FactoryName, id.Name, "") + if err != nil { + return nil, fmt.Errorf("reading %s: %+v", id, err) + } + + return utils.Bool(resp.ID != nil), nil +} + +func (r LinkedServiceKustoResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_data_factory_linked_service_kusto" "test" { + name = "acctestlskusto%d" + data_factory_id = azurerm_data_factory.test.id + kusto_endpoint = azurerm_kusto_cluster.test.uri + kusto_database_name = azurerm_kusto_database.test.name + use_managed_identity = true +} +`, r.template(data), data.RandomInteger) +} + +func (r LinkedServiceKustoResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_data_factory_linked_service_kusto" "import" { + name = azurerm_data_factory_linked_service_kusto.test.name + data_factory_id = azurerm_data_factory_linked_service_kusto.test.data_factory_id + kusto_endpoint = azurerm_data_factory_linked_service_kusto.test.kusto_endpoint + kusto_database_name = azurerm_data_factory_linked_service_kusto.test.kusto_database_name + use_managed_identity = azurerm_data_factory_linked_service_kusto.test.use_managed_identity +} +`, r.basic(data)) +} + +func (r LinkedServiceKustoResource) servicePrincipal(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +data "azurerm_client_config" "current" { +} + +resource "azurerm_data_factory_linked_service_kusto" "test" { + name = "acctestlskusto%d" + data_factory_id = azurerm_data_factory.test.id + kusto_endpoint = azurerm_kusto_cluster.test.uri + kusto_database_name = azurerm_kusto_database.test.name + service_principal_id = data.azurerm_client_config.current.client_id + service_principal_key = "testkey" + tenant = data.azurerm_client_config.current.tenant_id +} +`, r.template(data), data.RandomInteger) +} + +func (r LinkedServiceKustoResource) complete(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_data_factory_linked_service_kusto" "test" { + name = "acctestlskusto%d" + data_factory_id = azurerm_data_factory.test.id + kusto_endpoint = azurerm_kusto_cluster.test.uri + kusto_database_name = azurerm_kusto_database.test.name + use_managed_identity = true + + annotations = ["test1", "test2", "test3"] + description = "test description" + + parameters = { + foo = "test1" + bar = "test2" + } + + additional_properties = { + foo = "test1" + bar = "test2" + } +} +`, r.template(data), data.RandomInteger) +} + +func (LinkedServiceKustoResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-df-%d" + location = "%s" +} + +resource "azurerm_data_factory" "test" { + name = "acctestdf%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + identity { + type = "SystemAssigned" + } +} + +resource "azurerm_kusto_cluster" "test" { + name = "acctestkc%s" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + sku { + name = "Dev(No SLA)_Standard_D11_v2" + capacity = 1 + } +} + +resource "azurerm_kusto_database" "test" { + name = "acctestkd-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_name = azurerm_kusto_cluster.test.name +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomString, data.RandomInteger) +} diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_mysql_resource.go b/azurerm/internal/services/datafactory/data_factory_linked_service_mysql_resource.go index 9fb74f98845d..7cc026cd70d9 100644 --- a/azurerm/internal/services/datafactory/data_factory_linked_service_mysql_resource.go +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_mysql_resource.go @@ -123,7 +123,7 @@ func resourceDataFactoryLinkedServiceMySQLCreateUpdate(d *pluginsdk.ResourceData connectionString := d.Get("connection_string").(string) secureString := datafactory.SecureString{ Value: &connectionString, - Type: datafactory.TypeTypeSecureString, + Type: datafactory.TypeSecureString, } mysqlProperties := &datafactory.MySQLLinkedServiceTypeProperties{ diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_odata_resource.go b/azurerm/internal/services/datafactory/data_factory_linked_service_odata_resource.go new file mode 100644 index 000000000000..585a3e1c901b --- /dev/null +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_odata_resource.go @@ -0,0 +1,291 @@ +package datafactory + +import ( + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/services/datafactory/mgmt/2018-06-01/datafactory" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceArmDataFactoryLinkedServiceOData() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceArmDataFactoryLinkedServiceODataCreateUpdate, + Read: resourceArmDataFactoryLinkedServiceODataRead, + Update: resourceArmDataFactoryLinkedServiceODataCreateUpdate, + Delete: resourceArmDataFactoryLinkedServiceODataDelete, + + // TODO: add a custom importer for this + Importer: pluginsdk.DefaultImporter(), + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.LinkedServiceDatasetName, + }, + + "data_factory_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.DataFactoryName(), + }, + + // There's a bug in the Azure API where this is returned in lower-case + // BUG: https://github.com/Azure/azure-rest-api-specs/issues/5788 + "resource_group_name": azure.SchemaResourceGroupNameDiffSuppress(), + + "url": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "basic_authentication": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "username": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "password": { + Type: pluginsdk.TypeString, + Required: true, + Sensitive: true, + ValidateFunc: validation.StringIsNotEmpty, + // This isn't returned from the API so we'll ignore changes when it's empty + DiffSuppressFunc: func(k, old, new string, d *pluginsdk.ResourceData) bool { + return (new == d.Get(k).(string)) && (old == "*****") + }, + }, + }, + }, + }, + + "description": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "integration_runtime_name": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "annotations": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "parameters": { + Type: pluginsdk.TypeMap, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "additional_properties": { + Type: pluginsdk.TypeMap, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + }, + } +} + +func resourceArmDataFactoryLinkedServiceODataCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataFactory.LinkedServiceClient + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + name := d.Get("name").(string) + dataFactoryName := d.Get("data_factory_name").(string) + resourceGroup := d.Get("resource_group_name").(string) + + if d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, dataFactoryName, name, "") + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Data Factory Linked Service OData Anonymous %q (Data Factory %q / Resource Group %q): %+v", name, dataFactoryName, resourceGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_data_factory_linked_service_odata", *existing.ID) + } + } + + odataLinkedService := &datafactory.ODataLinkedService{ + Description: utils.String(d.Get("description").(string)), + Type: datafactory.TypeBasicLinkedServiceTypeOData, + ODataLinkedServiceTypeProperties: &datafactory.ODataLinkedServiceTypeProperties{ + AuthenticationType: datafactory.ODataAuthenticationTypeAnonymous, + URL: d.Get("url").(string), + }, + } + + // There are multiple authentication paths. If support for those get added, we can easily add them in + // a similar format to the below while not messing up the other attributes in ODataLinkedServiceTypeProperties + if v, ok := d.GetOk("basic_authentication"); ok { + attrs := v.([]interface{}) + if len(attrs) != 0 && attrs[0] != nil { + raw := attrs[0].(map[string]interface{}) + odataLinkedService.AuthenticationType = datafactory.ODataAuthenticationTypeBasic + odataLinkedService.UserName = raw["username"].(string) + odataLinkedService.Password = datafactory.SecureString{ + Value: utils.String(raw["password"].(string)), + Type: datafactory.TypeSecureString, + } + } + } + + if v, ok := d.GetOk("parameters"); ok { + odataLinkedService.Parameters = expandDataFactoryParameters(v.(map[string]interface{})) + } + + if v, ok := d.GetOk("integration_runtime_name"); ok { + odataLinkedService.ConnectVia = expandDataFactoryLinkedServiceIntegrationRuntime(v.(string)) + } + + if v, ok := d.GetOk("additional_properties"); ok { + odataLinkedService.AdditionalProperties = v.(map[string]interface{}) + } + + if v, ok := d.GetOk("annotations"); ok { + annotations := v.([]interface{}) + odataLinkedService.Annotations = &annotations + } + + linkedService := datafactory.LinkedServiceResource{ + Properties: odataLinkedService, + } + + if _, err := client.CreateOrUpdate(ctx, resourceGroup, dataFactoryName, name, linkedService, ""); err != nil { + return fmt.Errorf("Error creating/updating Data Factory Linked Service OData Anonymous %q (Data Factory %q / Resource Group %q): %+v", name, dataFactoryName, resourceGroup, err) + } + + resp, err := client.Get(ctx, resourceGroup, dataFactoryName, name, "") + if err != nil { + return fmt.Errorf("Error retrieving Data Factory Linked Service OData Anonymous %q (Data Factory %q / Resource Group %q): %+v", name, dataFactoryName, resourceGroup, err) + } + + if resp.ID == nil { + return fmt.Errorf("Cannot read Data Factory Linked Service OData Anonymous %q (Data Factory %q / Resource Group %q): %+v", name, dataFactoryName, resourceGroup, err) + } + + d.SetId(*resp.ID) + + return resourceArmDataFactoryLinkedServiceODataRead(d, meta) +} + +func resourceArmDataFactoryLinkedServiceODataRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataFactory.LinkedServiceClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.LinkedServiceID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, id.ResourceGroup, id.FactoryName, id.Name, "") + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving Data Factory Linked Service OData %q (Data Factory %q / Resource Group %q): %+v", id.Name, id.FactoryName, id.ResourceGroup, err) + } + + d.Set("name", resp.Name) + d.Set("resource_group_name", id.ResourceGroup) + d.Set("data_factory_name", id.FactoryName) + + odata, ok := resp.Properties.AsODataLinkedService() + if !ok { + return fmt.Errorf("Error classifiying Data Factory Linked Service OData %q (Data Factory %q / Resource Group %q): Expected: %q Received: %q", id.Name, id.FactoryName, id.ResourceGroup, datafactory.TypeBasicLinkedServiceTypeOData, *resp.Type) + } + + props := odata.ODataLinkedServiceTypeProperties + d.Set("url", props.URL) + if props.AuthenticationType == datafactory.ODataAuthenticationTypeBasic { + if err := d.Set("basic_authentication", []interface{}{map[string]interface{}{ + "username": props.UserName, + // `password` isn't returned from the api so we'll set it to `*****` here to be able to check for diffs during plan + "password": "*****", + }}); err != nil { + return fmt.Errorf("setting `basic_authentication`: %+v", err) + } + } + + d.Set("additional_properties", odata.AdditionalProperties) + d.Set("description", odata.Description) + + annotations := flattenDataFactoryAnnotations(odata.Annotations) + if err := d.Set("annotations", annotations); err != nil { + return fmt.Errorf("Error setting `annotations`: %+v", err) + } + + parameters := flattenDataFactoryParameters(odata.Parameters) + if err := d.Set("parameters", parameters); err != nil { + return fmt.Errorf("Error setting `parameters`: %+v", err) + } + + if connectVia := odata.ConnectVia; connectVia != nil { + if connectVia.ReferenceName != nil { + d.Set("integration_runtime_name", connectVia.ReferenceName) + } + } + + return nil +} + +func resourceArmDataFactoryLinkedServiceODataDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataFactory.LinkedServiceClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.LinkedServiceID(d.Id()) + if err != nil { + return err + } + + response, err := client.Delete(ctx, id.ResourceGroup, id.FactoryName, id.Name) + if err != nil { + if !utils.ResponseWasNotFound(response) { + return fmt.Errorf("Error deleting Data Factory Linked Service OData %q (Data Factory %q / Resource Group %q): %+v", id.Name, id.FactoryName, id.ResourceGroup, err) + } + } + + return nil +} diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_odata_resource_test.go b/azurerm/internal/services/datafactory/data_factory_linked_service_odata_resource_test.go new file mode 100644 index 000000000000..eff8ce402c89 --- /dev/null +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_odata_resource_test.go @@ -0,0 +1,215 @@ +package datafactory_test + +import ( + "context" + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type LinkedServiceODataResource struct { +} + +func TestAccDataFactoryLinkedServiceOData_anon_auth(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_linked_service_odata", "test") + r := LinkedServiceODataResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.anon_auth(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataFactoryLinkedServiceOData_basic_auth(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_linked_service_odata", "test") + r := LinkedServiceODataResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic_auth(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataFactoryLinkedServiceOData_basic_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_linked_service_odata", "test") + r := LinkedServiceODataResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.update1(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.update2(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func (t LinkedServiceODataResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.LinkedServiceID(state.ID) + if err != nil { + return nil, err + } + + resp, err := clients.DataFactory.LinkedServiceClient.Get(ctx, id.ResourceGroup, id.FactoryName, id.Name, "") + if err != nil { + return nil, fmt.Errorf("reading Data Factory Linked Service OData (%s): %+v", id, err) + } + + return utils.Bool(resp.ID != nil), nil +} + +func (LinkedServiceODataResource) anon_auth(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-df-%d" + location = "%s" +} + +resource "azurerm_data_factory" "test" { + name = "acctestdf%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_data_factory_linked_service_odata" "test" { + name = "acctestlsodata%d" + resource_group_name = azurerm_resource_group.test.name + data_factory_name = azurerm_data_factory.test.name + url = "https://services.odata.org/v4/TripPinServiceRW/People" +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) +} + +func (LinkedServiceODataResource) basic_auth(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-df-%d" + location = "%s" +} + +resource "azurerm_data_factory" "test" { + name = "acctestdf%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_data_factory_linked_service_odata" "test" { + name = "acctestlsodata%d" + resource_group_name = azurerm_resource_group.test.name + data_factory_name = azurerm_data_factory.test.name + url = "https://services.odata.org/v4/TripPinServiceRW/People" + basic_authentication { + username = "emma" + password = "Ch4ngeM3!" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) +} + +func (LinkedServiceODataResource) update1(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-df-%d" + location = "%s" +} + +resource "azurerm_data_factory" "test" { + name = "acctestdf%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_data_factory_linked_service_odata" "test" { + name = "acctestlsodata%d" + resource_group_name = azurerm_resource_group.test.name + data_factory_name = azurerm_data_factory.test.name + url = "https://services.odata.org/v4/TripPinServiceRW/" + annotations = ["test1", "test2", "test3"] + description = "test description" + + parameters = { + foo = "test1" + bar = "test2" + } + + additional_properties = { + foo = "test1" + bar = "test2" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) +} + +func (LinkedServiceODataResource) update2(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-df-%d" + location = "%s" +} + +resource "azurerm_data_factory" "test" { + name = "acctestdf%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_data_factory_linked_service_odata" "test" { + name = "acctestlsodata%d" + resource_group_name = azurerm_resource_group.test.name + data_factory_name = azurerm_data_factory.test.name + url = "https://services.odata.org/v4/TripPinServiceRW/People" + annotations = ["test1", "test2"] + description = "Test Description 2" + + parameters = { + foo = "Test1" + bar = "Test2" + buzz = "Test3" + } + + additional_properties = { + foo = "Test1" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) +} diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_postgresql_resource.go b/azurerm/internal/services/datafactory/data_factory_linked_service_postgresql_resource.go index ab423b4dd510..ecebb223f791 100644 --- a/azurerm/internal/services/datafactory/data_factory_linked_service_postgresql_resource.go +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_postgresql_resource.go @@ -123,7 +123,7 @@ func resourceDataFactoryLinkedServicePostgreSQLCreateUpdate(d *pluginsdk.Resourc connectionString := d.Get("connection_string").(string) secureString := datafactory.SecureString{ Value: &connectionString, - Type: datafactory.TypeTypeSecureString, + Type: datafactory.TypeSecureString, } postgresqlProperties := &datafactory.PostgreSQLLinkedServiceTypeProperties{ diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_sftp_resource.go b/azurerm/internal/services/datafactory/data_factory_linked_service_sftp_resource.go index f3d27dd77ccd..883d1841065b 100644 --- a/azurerm/internal/services/datafactory/data_factory_linked_service_sftp_resource.go +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_sftp_resource.go @@ -163,7 +163,7 @@ func resourceDataFactoryLinkedServiceSFTPCreateUpdate(d *pluginsdk.ResourceData, passwordSecureString := datafactory.SecureString{ Value: &password, - Type: datafactory.TypeTypeSecureString, + Type: datafactory.TypeSecureString, } sftpProperties := &datafactory.SftpServerLinkedServiceTypeProperties{ diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_snowflake_resource.go b/azurerm/internal/services/datafactory/data_factory_linked_service_snowflake_resource.go index 050e05c41801..dd28dee8cbe2 100644 --- a/azurerm/internal/services/datafactory/data_factory_linked_service_snowflake_resource.go +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_snowflake_resource.go @@ -151,7 +151,7 @@ func resourceDataFactoryLinkedServiceSnowflakeCreateUpdate(d *pluginsdk.Resource Description: utils.String(d.Get("description").(string)), SnowflakeLinkedServiceTypeProperties: &datafactory.SnowflakeLinkedServiceTypeProperties{ ConnectionString: d.Get("connection_string").(string), - Password: expandAzureKeyVaultPassword(password), + Password: expandAzureKeyVaultSecretReference(password), }, Type: datafactory.TypeBasicLinkedServiceTypeSnowflake, } @@ -255,7 +255,7 @@ func resourceDataFactoryLinkedServiceSnowflakeRead(d *pluginsdk.ResourceData, me if password := properties.Password; password != nil { if keyVaultPassword, ok := password.AsAzureKeyVaultSecretReference(); ok { - if err := d.Set("key_vault_password", flattenAzureKeyVaultPassword(keyVaultPassword)); err != nil { + if err := d.Set("key_vault_password", flattenAzureKeyVaultSecretReference(keyVaultPassword)); err != nil { return fmt.Errorf("setting `key_vault_password`: %+v", err) } } diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_snowflake_resource_test.go b/azurerm/internal/services/datafactory/data_factory_linked_service_snowflake_resource_test.go index a8fa4985009e..1d188f1248e6 100644 --- a/azurerm/internal/services/datafactory/data_factory_linked_service_snowflake_resource_test.go +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_snowflake_resource_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_sql_server_resource.go b/azurerm/internal/services/datafactory/data_factory_linked_service_sql_server_resource.go index cb8c67ee919d..da3a172701eb 100644 --- a/azurerm/internal/services/datafactory/data_factory_linked_service_sql_server_resource.go +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_sql_server_resource.go @@ -2,7 +2,6 @@ package datafactory import ( "fmt" - "log" "time" "github.com/Azure/azure-sdk-for-go/services/datafactory/mgmt/2018-06-01/datafactory" @@ -55,11 +54,34 @@ func resourceDataFactoryLinkedServiceSQLServer() *pluginsdk.Resource { "connection_string": { Type: pluginsdk.TypeString, - Required: true, + Optional: true, + ExactlyOneOf: []string{"connection_string", "key_vault_connection_string"}, DiffSuppressFunc: azureRmDataFactoryLinkedServiceConnectionStringDiff, ValidateFunc: validation.StringIsNotEmpty, }, + "key_vault_connection_string": { + Type: pluginsdk.TypeList, + Optional: true, + ExactlyOneOf: []string{"connection_string", "key_vault_connection_string"}, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "linked_service_name": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "secret_name": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + }, + }, + "key_vault_password": { Type: pluginsdk.TypeList, Optional: true, @@ -150,12 +172,19 @@ func resourceDataFactoryLinkedServiceSQLServerCreateUpdate(d *pluginsdk.Resource sqlServerLinkedService := &datafactory.SQLServerLinkedService{ Description: utils.String(d.Get("description").(string)), SQLServerLinkedServiceTypeProperties: &datafactory.SQLServerLinkedServiceTypeProperties{ - ConnectionString: d.Get("connection_string").(string), - Password: expandAzureKeyVaultPassword(password), + Password: expandAzureKeyVaultSecretReference(password), }, Type: datafactory.TypeBasicLinkedServiceTypeSQLServer, } + if v, ok := d.GetOk("connection_string"); ok { + sqlServerLinkedService.SQLServerLinkedServiceTypeProperties.ConnectionString = v.(string) + } + + if v, ok := d.GetOk("key_vault_connection_string"); ok { + sqlServerLinkedService.SQLServerLinkedServiceTypeProperties.ConnectionString = expandAzureKeyVaultSecretReference(v.([]interface{})) + } + if v, ok := d.GetOk("parameters"); ok { sqlServerLinkedService.Parameters = expandDataFactoryParameters(v.(map[string]interface{})) } @@ -245,17 +274,20 @@ func resourceDataFactoryLinkedServiceSQLServerRead(d *pluginsdk.ResourceData, me if properties := sqlServer.SQLServerLinkedServiceTypeProperties; properties != nil { if properties.ConnectionString != nil { - if val, ok := properties.ConnectionString.(string); ok { + if val, ok := properties.ConnectionString.(map[string]interface{}); ok { + if err := d.Set("key_vault_connection_string", flattenAzureKeyVaultConnectionString(val)); err != nil { + return fmt.Errorf("setting `key_vault_connection_string`: %+v", err) + } + } else if val, ok := properties.ConnectionString.(string); ok { d.Set("connection_string", val) } else { - d.Set("connection_string", "") - log.Printf("[DEBUG] Skipping connection string %q since it's not a string", val) + return fmt.Errorf("setting `connection_string`: %+v", err) } } if password := properties.Password; password != nil { if keyVaultPassword, ok := password.AsAzureKeyVaultSecretReference(); ok { - if err := d.Set("key_vault_password", flattenAzureKeyVaultPassword(keyVaultPassword)); err != nil { + if err := d.Set("key_vault_password", flattenAzureKeyVaultSecretReference(keyVaultPassword)); err != nil { return fmt.Errorf("setting `key_vault_password`: %+v", err) } } diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_sql_server_resource_test.go b/azurerm/internal/services/datafactory/data_factory_linked_service_sql_server_resource_test.go index 1f2b0f1a91da..e240a961d4a4 100644 --- a/azurerm/internal/services/datafactory/data_factory_linked_service_sql_server_resource_test.go +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_sql_server_resource_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -48,7 +47,7 @@ func TestAccDataFactoryLinkedServiceSQLServer_basic(t *testing.T) { }) } -func TestAccDataFactoryLinkedServiceSQLServer_KeyVaultReference(t *testing.T) { +func TestAccDataFactoryLinkedServiceSQLServer_PasswordKeyVaultReference(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_data_factory_linked_service_sql_server", "test") r := LinkedServiceSQLServerResource{} @@ -66,6 +65,25 @@ func TestAccDataFactoryLinkedServiceSQLServer_KeyVaultReference(t *testing.T) { }) } +func TestAccDataFactoryLinkedServiceSQLServer_ConnectionStringKeyVaultReference(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_linked_service_sql_server", "test") + r := LinkedServiceSQLServerResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.connection_string_key_vault_reference(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("key_vault_connection_string.0.linked_service_name").HasValue("linkkv"), + check.That(data.ResourceName).Key("key_vault_connection_string.0.secret_name").HasValue("connection_string"), + check.That(data.ResourceName).Key("key_vault_password.0.linked_service_name").HasValue("linkkv"), + check.That(data.ResourceName).Key("key_vault_password.0.secret_name").HasValue("password"), + ), + }, + data.ImportStep(), + }) +} + func (t LinkedServiceSQLServerResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := azure.ParseAzureResourceID(state.ID) if err != nil { @@ -206,3 +224,55 @@ resource "azurerm_data_factory_linked_service_sql_server" "test" { } `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) } + +func (LinkedServiceSQLServerResource) connection_string_key_vault_reference(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +data "azurerm_client_config" "current" {} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-df-%d" + location = "%s" +} + +resource "azurerm_key_vault" "test" { + name = "acctkv%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + tenant_id = data.azurerm_client_config.current.tenant_id + sku_name = "standard" +} + +resource "azurerm_data_factory" "test" { + name = "acctestdf%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_data_factory_linked_service_key_vault" "test" { + name = "linkkv" + resource_group_name = azurerm_resource_group.test.name + data_factory_name = azurerm_data_factory.test.name + key_vault_id = azurerm_key_vault.test.id +} + +resource "azurerm_data_factory_linked_service_sql_server" "test" { + name = "linksqlserver" + resource_group_name = azurerm_resource_group.test.name + data_factory_name = azurerm_data_factory.test.name + + key_vault_connection_string { + linked_service_name = azurerm_data_factory_linked_service_key_vault.test.name + secret_name = "connection_string" + } + + key_vault_password { + linked_service_name = azurerm_data_factory_linked_service_key_vault.test.name + secret_name = "password" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) +} diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_synapse_resource.go b/azurerm/internal/services/datafactory/data_factory_linked_service_synapse_resource.go index b85e555ad54b..4047622a8828 100644 --- a/azurerm/internal/services/datafactory/data_factory_linked_service_synapse_resource.go +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_synapse_resource.go @@ -151,7 +151,7 @@ func resourceDataFactoryLinkedServiceSynapseCreateUpdate(d *pluginsdk.ResourceDa Description: utils.String(d.Get("description").(string)), AzureSQLDWLinkedServiceTypeProperties: &datafactory.AzureSQLDWLinkedServiceTypeProperties{ ConnectionString: d.Get("connection_string").(string), - Password: expandAzureKeyVaultPassword(password), + Password: expandAzureKeyVaultSecretReference(password), }, Type: datafactory.TypeBasicLinkedServiceTypeAzureSQLDW, } @@ -253,7 +253,7 @@ func resourceDataFactoryLinkedServiceSynapseRead(d *pluginsdk.ResourceData, meta } } - if err := d.Set("key_vault_password", flattenAzureKeyVaultPassword(properties.Password)); err != nil { + if err := d.Set("key_vault_password", flattenAzureKeyVaultSecretReference(properties.Password)); err != nil { return fmt.Errorf("setting `key_vault_password`: %+v", err) } } diff --git a/azurerm/internal/services/datafactory/data_factory_linked_service_web_resource.go b/azurerm/internal/services/datafactory/data_factory_linked_service_web_resource.go index baa9ed43e656..8f0c6c5b963e 100644 --- a/azurerm/internal/services/datafactory/data_factory_linked_service_web_resource.go +++ b/azurerm/internal/services/datafactory/data_factory_linked_service_web_resource.go @@ -161,7 +161,7 @@ func resourceDataFactoryLinkedServiceWebCreateUpdate(d *pluginsdk.ResourceData, password := d.Get("password").(string) passwordSecureString := datafactory.SecureString{ Value: &password, - Type: datafactory.TypeTypeSecureString, + Type: datafactory.TypeSecureString, } basicAuthProperties := &datafactory.WebBasicAuthentication{ AuthenticationType: datafactory.AuthenticationType(authenticationType), diff --git a/azurerm/internal/services/datafactory/data_factory_managed_private_endpoint_resource.go b/azurerm/internal/services/datafactory/data_factory_managed_private_endpoint_resource.go new file mode 100644 index 000000000000..7896f28a6f33 --- /dev/null +++ b/azurerm/internal/services/datafactory/data_factory_managed_private_endpoint_resource.go @@ -0,0 +1,181 @@ +package datafactory + +import ( + "context" + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/services/datafactory/mgmt/2018-06-01/datafactory" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/validate" + networkValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceDataFactoryManagedPrivateEndpoint() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceDataFactoryManagedPrivateEndpointCreate, + Read: resourceDataFactoryManagedPrivateEndpointRead, + Delete: resourceDataFactoryManagedPrivateEndpointDelete, + + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.ManagedPrivateEndpointID(id) + return err + }), + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.DataFactoryManagedPrivateEndpointName(), + }, + + "data_factory_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.DataFactoryID, + }, + + "target_resource_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + }, + + "subresource_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: networkValidate.PrivateLinkSubResourceName, + }, + }, + } +} + +func resourceDataFactoryManagedPrivateEndpointCreate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataFactory.ManagedPrivateEndpointsClient + managedVirtualNetworksClient := meta.(*clients.Client).DataFactory.ManagedVirtualNetworksClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) + defer cancel() + + dataFactoryId, err := parse.DataFactoryID(d.Get("data_factory_id").(string)) + if err != nil { + return err + } + + managedVirtualNetworkName, err := getManagedVirtualNetworkName(ctx, managedVirtualNetworksClient, dataFactoryId.ResourceGroup, dataFactoryId.FactoryName) + if err != nil { + return err + } + if managedVirtualNetworkName == nil { + return fmt.Errorf("managed Private endpoints are only available after managed virtual network for %s is enabled", dataFactoryId) + } + + id := parse.NewManagedPrivateEndpointID(subscriptionId, dataFactoryId.ResourceGroup, dataFactoryId.FactoryName, *managedVirtualNetworkName, d.Get("name").(string)) + existing, err := getManagedPrivateEndpoint(ctx, client, id.ResourceGroup, id.FactoryName, *managedVirtualNetworkName, id.Name) + if err != nil { + return fmt.Errorf("checking for presence of existing %s: %+v", id, err) + } + if existing != nil { + return tf.ImportAsExistsError("azurerm_data_factory_managed_private_endpoint", id.ID()) + } + + managedPrivateEndpoint := datafactory.ManagedPrivateEndpointResource{ + Properties: &datafactory.ManagedPrivateEndpoint{ + PrivateLinkResourceID: utils.String(d.Get("target_resource_id").(string)), + GroupID: utils.String(d.Get("subresource_name").(string)), + }, + } + + if _, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.FactoryName, id.ManagedVirtualNetworkName, id.Name, managedPrivateEndpoint, ""); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) + } + + d.SetId(id.ID()) + + return resourceDataFactoryManagedPrivateEndpointRead(d, meta) +} + +func resourceDataFactoryManagedPrivateEndpointRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataFactory.ManagedPrivateEndpointsClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.ManagedPrivateEndpointID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, id.ResourceGroup, id.FactoryName, id.ManagedVirtualNetworkName, id.Name, "") + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + d.SetId("") + return nil + } + + return fmt.Errorf("retrieving %s: %+v", id, err) + } + + d.Set("name", id.Name) + d.Set("data_factory_id", parse.NewDataFactoryID(subscriptionId, id.ResourceGroup, id.FactoryName).ID()) + + if props := resp.Properties; props != nil { + d.Set("target_resource_id", props.PrivateLinkResourceID) + d.Set("subresource_name", props.GroupID) + } + + return nil +} + +func resourceDataFactoryManagedPrivateEndpointDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataFactory.ManagedPrivateEndpointsClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.ManagedPrivateEndpointID(d.Id()) + if err != nil { + return err + } + + if _, err := client.Delete(ctx, id.ResourceGroup, id.FactoryName, id.ManagedVirtualNetworkName, id.Name); err != nil { + return fmt.Errorf("deleting %s: %+v", id, err) + } + + return nil +} + +// if ManagedPrivateEndpoint not exist, get rest api will return 400 bad request +// invoke list rets api and then filter by name +func getManagedPrivateEndpoint(ctx context.Context, client *datafactory.ManagedPrivateEndpointsClient, resourceGroupName, factoryName, managedVirtualNetworkName, name string) (*datafactory.ManagedPrivateEndpointResource, error) { + iter, err := client.ListByFactoryComplete(ctx, resourceGroupName, factoryName, managedVirtualNetworkName) + if err != nil { + return nil, err + } + for iter.NotDone() { + managedPrivateEndpoint := iter.Value() + if managedPrivateEndpoint.Name != nil && *managedPrivateEndpoint.Name == name { + return &managedPrivateEndpoint, nil + } + + if err := iter.NextWithContext(ctx); err != nil { + return nil, err + } + } + return nil, nil +} diff --git a/azurerm/internal/services/datafactory/data_factory_managed_private_endpoint_resource_test.go b/azurerm/internal/services/datafactory/data_factory_managed_private_endpoint_resource_test.go new file mode 100644 index 000000000000..573ef81e7e7c --- /dev/null +++ b/azurerm/internal/services/datafactory/data_factory_managed_private_endpoint_resource_test.go @@ -0,0 +1,126 @@ +package datafactory_test + +import ( + "context" + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type ManagedPrivateEndpointResource struct{} + +func TestAccDataFactoryManagedPrivateEndpoint_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_managed_private_endpoint", "test") + r := ManagedPrivateEndpointResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataFactoryManagedPrivateEndpoint_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_managed_private_endpoint", "test") + r := ManagedPrivateEndpointResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func (r ManagedPrivateEndpointResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.ManagedPrivateEndpointID(state.ID) + if err != nil { + return nil, err + } + + iter, err := client.DataFactory.ManagedPrivateEndpointsClient.ListByFactoryComplete(ctx, id.ResourceGroup, id.FactoryName, id.ManagedVirtualNetworkName) + if err != nil { + return nil, fmt.Errorf("listing %s: %+v", id, err) + } + for iter.NotDone() { + managedPrivateEndpoint := iter.Value() + if managedPrivateEndpoint.Name != nil && *managedPrivateEndpoint.Name == id.Name { + return utils.Bool(true), nil + } + + if err := iter.NextWithContext(ctx); err != nil { + return nil, err + } + } + return utils.Bool(false), nil +} + +func (r ManagedPrivateEndpointResource) basic(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` + %s + +resource "azurerm_data_factory_managed_private_endpoint" "test" { + name = "acctestEndpoint%d" + data_factory_id = azurerm_data_factory.test.id + target_resource_id = azurerm_storage_account.test.id + subresource_name = "blob" +} +`, template, data.RandomInteger) +} + +func (r ManagedPrivateEndpointResource) requiresImport(data acceptance.TestData) string { + config := r.basic(data) + return fmt.Sprintf(` + %s + +resource "azurerm_data_factory_managed_private_endpoint" "import" { + name = azurerm_data_factory_managed_private_endpoint.test.name + data_factory_id = azurerm_data_factory_managed_private_endpoint.test.data_factory_id + target_resource_id = azurerm_data_factory_managed_private_endpoint.test.target_resource_id + subresource_name = azurerm_data_factory_managed_private_endpoint.test.subresource_name +} +`, config) +} + +func (r ManagedPrivateEndpointResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-adf-%d" + location = "%s" +} + +resource "azurerm_data_factory" "test" { + name = "acctestdf%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + managed_virtual_network_enabled = true +} + +resource "azurerm_storage_account" "test" { + name = "acctestacc%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_kind = "BlobStorage" + account_tier = "Standard" + account_replication_type = "LRS" +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomString) +} diff --git a/azurerm/internal/services/datafactory/data_factory_pipeline_resource_test.go b/azurerm/internal/services/datafactory/data_factory_pipeline_resource_test.go index c588b7e106e7..45cf8ec5a5eb 100644 --- a/azurerm/internal/services/datafactory/data_factory_pipeline_resource_test.go +++ b/azurerm/internal/services/datafactory/data_factory_pipeline_resource_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" diff --git a/azurerm/internal/services/datafactory/data_factory_resource.go b/azurerm/internal/services/datafactory/data_factory_resource.go index 44aad7f00c60..191c5192313c 100644 --- a/azurerm/internal/services/datafactory/data_factory_resource.go +++ b/azurerm/internal/services/datafactory/data_factory_resource.go @@ -1,6 +1,7 @@ package datafactory import ( + "context" "fmt" "time" @@ -9,6 +10,7 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/migration" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/validate" keyVaultParse "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/keyvault/parse" keyVaultValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/keyvault/validate" @@ -28,9 +30,10 @@ func resourceDataFactory() *pluginsdk.Resource { Update: resourceDataFactoryCreateUpdate, Delete: resourceDataFactoryDelete, - SchemaVersion: 1, + SchemaVersion: 2, StateUpgraders: pluginsdk.StateUpgrades(map[int]pluginsdk.StateUpgrade{ 0: migration.DataFactoryV0ToV1{}, + 1: migration.DataFactoryV1ToV2{}, }), // TODO: replace this with an importer which validates the ID during import @@ -172,6 +175,44 @@ func resourceDataFactory() *pluginsdk.Resource { }, }, + "global_parameter": { + Type: pluginsdk.TypeSet, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "type": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "Array", + "Bool", + "Float", + "Int", + "Object", + "String", + }, false), + }, + + "value": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + }, + }, + + "managed_virtual_network_enabled": { + Type: pluginsdk.TypeBool, + Optional: true, + }, + "public_network_enabled": { Type: pluginsdk.TypeBool, Optional: true, @@ -192,31 +233,30 @@ func resourceDataFactory() *pluginsdk.Resource { func resourceDataFactoryCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).DataFactory.FactoriesClient + managedVirtualNetworksClient := meta.(*clients.Client).DataFactory.ManagedVirtualNetworksClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() - name := d.Get("name").(string) - location := azure.NormalizeLocation(d.Get("location").(string)) - resourceGroup := d.Get("resource_group_name").(string) - t := d.Get("tags").(map[string]interface{}) - + id := parse.NewDataFactoryID(subscriptionId, d.Get("resource_group_name").(string), d.Get("name").(string)) if d.IsNewResource() { - existing, err := client.Get(ctx, resourceGroup, name, "") + existing, err := client.Get(ctx, id.ResourceGroup, id.FactoryName, "") if err != nil { if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("Error checking for presence of existing Data Factory %q (Resource Group %q): %s", name, resourceGroup, err) + return fmt.Errorf("checking for presence of existing %s: %+v", id, err) } } - if existing.ID != nil && *existing.ID != "" { - return tf.ImportAsExistsError("azurerm_data_factory", *existing.ID) + if !utils.ResponseWasNotFound(existing.Response) { + return tf.ImportAsExistsError("azurerm_data_factory", id.ID()) } } + location := azure.NormalizeLocation(d.Get("location").(string)) dataFactory := datafactory.Factory{ Location: &location, FactoryProperties: &datafactory.FactoryProperties{}, - Tags: tags.Expand(t), + Tags: tags.Expand(d.Get("tags").(map[string]interface{})), } dataFactory.PublicNetworkAccess = datafactory.PublicNetworkAccessEnabled @@ -263,58 +303,72 @@ func resourceDataFactoryCreateUpdate(d *pluginsdk.ResourceData, meta interface{} } } - if _, err := client.CreateOrUpdate(ctx, resourceGroup, name, dataFactory, ""); err != nil { - return fmt.Errorf("Error creating/updating Data Factory %q (Resource Group %q): %+v", name, resourceGroup, err) - } - - resp, err := client.Get(ctx, resourceGroup, name, "") + globalParameters, err := expandDataFactoryGlobalParameters(d.Get("global_parameter").(*pluginsdk.Set).List()) if err != nil { - return fmt.Errorf("Error retrieving Data Factory %q (Resource Group %q): %+v", name, resourceGroup, err) + return err } + dataFactory.FactoryProperties.GlobalParameters = globalParameters - if resp.ID == nil { - return fmt.Errorf("Cannot read Data Factory %q (Resource Group %q) ID", name, resourceGroup) + if _, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.FactoryName, dataFactory, ""); err != nil { + return fmt.Errorf("creating/updating %s: %+v", id, err) } if hasRepo, repo := expandDataFactoryRepoConfiguration(d); hasRepo { repoUpdate := datafactory.FactoryRepoUpdate{ - FactoryResourceID: resp.ID, + FactoryResourceID: utils.String(id.ID()), RepoConfiguration: repo, } - if _, err = client.ConfigureFactoryRepo(ctx, location, repoUpdate); err != nil { - return fmt.Errorf("Error configuring Repository for Data Factory %q (Resource Group %q): %+v", name, resourceGroup, err) + if _, err := client.ConfigureFactoryRepo(ctx, location, repoUpdate); err != nil { + return fmt.Errorf("configuring Repository for %s: %+v", id, err) } } - d.SetId(*resp.ID) + managedVirtualNetworkEnabled := d.Get("managed_virtual_network_enabled").(bool) + // only pass datafactory.ManagedVirtualNetworkResource{} will cause rest api error + resource := datafactory.ManagedVirtualNetworkResource{ + Properties: &datafactory.ManagedVirtualNetwork{}, + } + if d.IsNewResource() && managedVirtualNetworkEnabled { + if _, err := managedVirtualNetworksClient.CreateOrUpdate(ctx, id.ResourceGroup, id.FactoryName, "default", resource, ""); err != nil { + return fmt.Errorf("creating virtual network for %s: %+v", id, err) + } + } else if !d.IsNewResource() && d.HasChange("managed_virtual_network_enabled") { + if !managedVirtualNetworkEnabled { + return fmt.Errorf("updating %s: once Managed Virtual Network has been Enabled it's not possible to disable it", id) + } + if _, err := managedVirtualNetworksClient.CreateOrUpdate(ctx, id.ResourceGroup, id.FactoryName, "default", resource, ""); err != nil { + return fmt.Errorf("creating virtual network for %s: %+v", id, err) + } + } + + d.SetId(id.ID()) return resourceDataFactoryRead(d, meta) } func resourceDataFactoryRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).DataFactory.FactoriesClient + managedVirtualNetworksClient := meta.(*clients.Client).DataFactory.ManagedVirtualNetworksClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := azure.ParseAzureResourceID(d.Id()) + id, err := parse.DataFactoryID(d.Id()) if err != nil { return err } - resourceGroup := id.ResourceGroup - name := id.Path["factories"] - resp, err := client.Get(ctx, resourceGroup, name, "") + resp, err := client.Get(ctx, id.ResourceGroup, id.FactoryName, "") if err != nil { if utils.ResponseWasNotFound(resp.Response) { d.SetId("") return nil } - return fmt.Errorf("Error retrieving Data Factory %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("retrieving %s: %+v", id, err) } - d.Set("name", resp.Name) - d.Set("resource_group_name", resourceGroup) + d.Set("name", id.FactoryName) + d.Set("resource_group_name", id.ResourceGroup) if location := resp.Location; location != nil { d.Set("location", azure.NormalizeLocation(*location)) } @@ -328,6 +382,10 @@ func resourceDataFactoryRead(d *pluginsdk.ResourceData, meta interface{}) error } } } + + if err := d.Set("global_parameter", flattenDataFactoryGlobalParameters(factoryProps.GlobalParameters)); err != nil { + return fmt.Errorf("setting `global_parameter`: %+v", err) + } } d.Set("vsts_configuration", []interface{}{}) @@ -361,6 +419,16 @@ func resourceDataFactoryRead(d *pluginsdk.ResourceData, meta interface{}) error d.Set("public_network_enabled", resp.PublicNetworkAccess == datafactory.PublicNetworkAccessEnabled) } + managedVirtualNetworkEnabled := false + managedVirtualNetworkName, err := getManagedVirtualNetworkName(ctx, managedVirtualNetworksClient, id.ResourceGroup, id.FactoryName) + if err != nil { + return err + } + if managedVirtualNetworkName != nil { + managedVirtualNetworkEnabled = true + } + d.Set("managed_virtual_network_enabled", managedVirtualNetworkEnabled) + return tags.FlattenAndSet(d, resp.Tags) } @@ -369,17 +437,15 @@ func resourceDataFactoryDelete(d *pluginsdk.ResourceData, meta interface{}) erro ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := azure.ParseAzureResourceID(d.Id()) + id, err := parse.DataFactoryID(d.Id()) if err != nil { return err } - resourceGroup := id.ResourceGroup - name := id.Path["factories"] - response, err := client.Delete(ctx, resourceGroup, name) + response, err := client.Delete(ctx, id.ResourceGroup, id.FactoryName) if err != nil { if !utils.ResponseWasNotFound(response) { - return fmt.Errorf("Error deleting Data Factory %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("deleting %s: %+v", id, err) } } @@ -424,6 +490,30 @@ func expandDataFactoryRepoConfiguration(d *pluginsdk.ResourceData) (bool, datafa return false, nil } +func expandDataFactoryGlobalParameters(input []interface{}) (map[string]*datafactory.GlobalParameterSpecification, error) { + if len(input) == 0 { + return nil, nil + } + result := make(map[string]*datafactory.GlobalParameterSpecification) + for _, item := range input { + if item == nil { + continue + } + v := item.(map[string]interface{}) + + name := v["name"].(string) + if _, ok := v[name]; ok { + return nil, fmt.Errorf("duplicate parameter name") + } + + result[name] = &datafactory.GlobalParameterSpecification{ + Type: datafactory.GlobalParameterType(v["type"].(string)), + Value: v["value"].(string), + } + } + return result, nil +} + func flattenDataFactoryRepoConfiguration(factory *datafactory.Factory) (datafactory.TypeBasicFactoryRepoConfiguration, []interface{}) { result := make([]interface{}, 0) @@ -508,3 +598,31 @@ func flattenDataFactoryIdentity(identity *datafactory.FactoryIdentity) (interfac }, }, nil } + +func flattenDataFactoryGlobalParameters(input map[string]*datafactory.GlobalParameterSpecification) []interface{} { + if len(input) == 0 { + return []interface{}{} + } + result := make([]interface{}, 0) + for name, item := range input { + result = append(result, map[string]interface{}{ + "name": name, + "type": string(item.Type), + "value": item.Value, + }) + } + return result +} + +// Only one VNet is allowed per factory +func getManagedVirtualNetworkName(ctx context.Context, client *datafactory.ManagedVirtualNetworksClient, resourceGroup, factoryName string) (*string, error) { + resp, err := client.ListByFactory(ctx, resourceGroup, factoryName) + if err != nil { + return nil, err + } + if len(resp.Values()) == 0 { + return nil, nil + } + managedVirtualNetwork := resp.Values()[0] + return managedVirtualNetwork.Name, nil +} diff --git a/azurerm/internal/services/datafactory/data_factory_resource_migration.go b/azurerm/internal/services/datafactory/data_factory_resource_migration.go deleted file mode 100644 index 11ab5f1f2b13..000000000000 --- a/azurerm/internal/services/datafactory/data_factory_resource_migration.go +++ /dev/null @@ -1,35 +0,0 @@ -package datafactory - -import ( - "fmt" - "log" - - "github.com/Azure/azure-sdk-for-go/services/datafactory/mgmt/2018-06-01/datafactory" - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -func ResourceDataFactoryMigrateState( - v int, is *terraform.InstanceState, _ interface{}) (*terraform.InstanceState, error) { - switch v { - case 0: - log.Println("[INFO] Found AzureRM DataFactory State v0; migrating to v1") - return migrateDataFactoryStateV0toV1(is) - default: - return is, fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateDataFactoryStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { - if is.Empty() { - log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return is, nil - } - - log.Printf("[DEBUG] ARM Data Factory Attributes before Migration: %#v", is.Attributes) - - is.Attributes["public_network_enabled"] = string(datafactory.PublicNetworkAccessEnabled) - - log.Printf("[DEBUG] ARM Data Factory Attributes after State Migration: %#v", is.Attributes) - - return is, nil -} diff --git a/azurerm/internal/services/datafactory/data_factory_resource_test.go b/azurerm/internal/services/datafactory/data_factory_resource_test.go index 11b61714e51b..70f8a649e542 100644 --- a/azurerm/internal/services/datafactory/data_factory_resource_test.go +++ b/azurerm/internal/services/datafactory/data_factory_resource_test.go @@ -5,10 +5,10 @@ import ( "fmt" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -195,15 +195,93 @@ func TestAccDataFactory_keyVaultKeyEncryption(t *testing.T) { }) } +func TestAccDataFactory_globalParameter(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory", "test") + r := DataFactoryResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.globalParameter(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataFactory_globalParameterUpdate(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory", "test") + r := DataFactoryResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.globalParameter(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + }) +} + +func TestAccDataFactory_managedVirtualNetwork(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory", "test") + r := DataFactoryResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.managedVirtualNetwork(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataFactory_managedVirtualNetworkUpdated(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory", "test") + r := DataFactoryResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.managedVirtualNetwork(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func (t DataFactoryResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := azure.ParseAzureResourceID(state.ID) + id, err := parse.DataFactoryID(state.ID) if err != nil { return nil, err } - resourceGroup := id.ResourceGroup - name := id.Path["factories"] - resp, err := clients.DataFactory.FactoriesClient.Get(ctx, resourceGroup, name, "") + resp, err := clients.DataFactory.FactoriesClient.Get(ctx, id.ResourceGroup, id.FactoryName, "") if err != nil { return nil, fmt.Errorf("reading Data Factory (%s): %+v", id, err) } @@ -212,14 +290,12 @@ func (t DataFactoryResource) Exists(ctx context.Context, clients *clients.Client } func (DataFactoryResource) Destroy(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := azure.ParseAzureResourceID(state.ID) + id, err := parse.DataFactoryID(state.ID) if err != nil { return nil, err } - resourceGroup := id.ResourceGroup - name := id.Path["factories"] - resp, err := client.DataFactory.FactoriesClient.Delete(ctx, resourceGroup, name) + resp, err := client.DataFactory.FactoriesClient.Delete(ctx, id.ResourceGroup, id.FactoryName) if err != nil { if !utils.ResponseWasNotFound(resp) { return nil, fmt.Errorf("delete on dataFactoryClient: %+v", err) @@ -241,7 +317,7 @@ resource "azurerm_resource_group" "test" { } resource "azurerm_data_factory" "test" { - name = "acctestdf%d" + name = "acctestDF%d" location = azurerm_resource_group.test.location resource_group_name = azurerm_resource_group.test.name } @@ -260,7 +336,7 @@ resource "azurerm_resource_group" "test" { } resource "azurerm_data_factory" "test" { - name = "acctestdf%d" + name = "acctestDF%d" location = azurerm_resource_group.test.location resource_group_name = azurerm_resource_group.test.name @@ -283,7 +359,7 @@ resource "azurerm_resource_group" "test" { } resource "azurerm_data_factory" "test" { - name = "acctestdf%d" + name = "acctestDF%d" location = azurerm_resource_group.test.location resource_group_name = azurerm_resource_group.test.name @@ -307,7 +383,7 @@ resource "azurerm_resource_group" "test" { } resource "azurerm_data_factory" "test" { - name = "acctestdf%d" + name = "acctestDF%d" location = azurerm_resource_group.test.location resource_group_name = azurerm_resource_group.test.name @@ -330,7 +406,7 @@ resource "azurerm_resource_group" "test" { } resource "azurerm_data_factory" "test" { - name = "acctestdf%d" + name = "acctestDF%d" location = azurerm_resource_group.test.location resource_group_name = azurerm_resource_group.test.name @@ -357,7 +433,7 @@ resource "azurerm_resource_group" "test" { } resource "azurerm_data_factory" "test" { - name = "acctestdf%d" + name = "acctestDF%d" location = azurerm_resource_group.test.location resource_group_name = azurerm_resource_group.test.name @@ -384,7 +460,7 @@ resource "azurerm_resource_group" "test" { } resource "azurerm_data_factory" "test" { - name = "acctestdf%d" + name = "acctestDF%d" location = azurerm_resource_group.test.location resource_group_name = azurerm_resource_group.test.name @@ -508,3 +584,78 @@ resource "azurerm_data_factory" "test" { } `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger) } + +func (DataFactoryResource) globalParameter(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-df-%d" + location = "%s" +} + +resource "azurerm_data_factory" "test" { + name = "acctestdf%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + global_parameter { + name = "intVal" + type = "Int" + value = "3" + } + + global_parameter { + name = "stringVal" + type = "String" + value = "foo" + } + + global_parameter { + name = "boolVal" + type = "Bool" + value = "true" + } + + global_parameter { + name = "floatVal" + type = "Float" + value = "3.0" + } + + global_parameter { + name = "arrayVal" + type = "Array" + value = "[\"a\", \"b\", \"c\"]" + } + + global_parameter { + name = "objectVal" + type = "Object" + value = "{'name': 'value'}" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +} + +func (DataFactoryResource) managedVirtualNetwork(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-df-%d" + location = "%s" +} + +resource "azurerm_data_factory" "test" { + name = "acctestDF%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + managed_virtual_network_enabled = true +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +} diff --git a/azurerm/internal/services/datafactory/data_factory_trigger_blob_event_resource.go b/azurerm/internal/services/datafactory/data_factory_trigger_blob_event_resource.go new file mode 100644 index 000000000000..599ee099b99d --- /dev/null +++ b/azurerm/internal/services/datafactory/data_factory_trigger_blob_event_resource.go @@ -0,0 +1,340 @@ +package datafactory + +import ( + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/services/datafactory/mgmt/2018-06-01/datafactory" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/validate" + storageValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/storage/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceDataFactoryTriggerBlobEvent() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceDataFactoryTriggerBlobEventCreateUpdate, + Read: resourceDataFactoryTriggerBlobEventRead, + Update: resourceDataFactoryTriggerBlobEventCreateUpdate, + Delete: resourceDataFactoryTriggerBlobEventDelete, + + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.TriggerID(id) + return err + }), + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.DataFactoryPipelineAndTriggerName(), + }, + + "data_factory_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.DataFactoryID, + }, + + "storage_account_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: storageValidate.StorageAccountID, + }, + + "events": { + Type: pluginsdk.TypeSet, + Required: true, + MinItems: 1, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringInSlice([]string{ + "Microsoft.Storage.BlobCreated", + "Microsoft.Storage.BlobDeleted", + }, false), + }, + }, + + "pipeline": { + Type: pluginsdk.TypeSet, + Required: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validate.DataFactoryPipelineAndTriggerName(), + }, + + "parameters": { + Type: pluginsdk.TypeMap, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + }, + }, + }, + + "additional_properties": { + Type: pluginsdk.TypeMap, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "annotations": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + + "blob_path_begins_with": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + AtLeastOneOf: []string{"blob_path_begins_with", "blob_path_ends_with"}, + }, + + "blob_path_ends_with": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + AtLeastOneOf: []string{"blob_path_begins_with", "blob_path_ends_with"}, + }, + + "description": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "ignore_empty_blobs": { + Type: pluginsdk.TypeBool, + Optional: true, + }, + }, + } +} + +func resourceDataFactoryTriggerBlobEventCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataFactory.TriggersClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + dataFactoryId, err := parse.DataFactoryID(d.Get("data_factory_id").(string)) + if err != nil { + return err + } + + id := parse.NewTriggerID(subscriptionId, dataFactoryId.ResourceGroup, dataFactoryId.FactoryName, d.Get("name").(string)) + if d.IsNewResource() { + existing, err := client.Get(ctx, id.ResourceGroup, id.FactoryName, id.Name, "") + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("checking for presence of existing %s: %+v", id, err) + } + } + if !utils.ResponseWasNotFound(existing.Response) { + return tf.ImportAsExistsError("azurerm_data_factory_trigger_blob_event", id.ID()) + } + } + + blobEventProps := &datafactory.BlobEventsTrigger{ + BlobEventsTriggerTypeProperties: &datafactory.BlobEventsTriggerTypeProperties{ + IgnoreEmptyBlobs: utils.Bool(d.Get("ignore_empty_blobs").(bool)), + Events: expandDataFactoryTriggerBlobEvents(d.Get("events").(*pluginsdk.Set).List()), + Scope: utils.String(d.Get("storage_account_id").(string)), + }, + Description: utils.String(d.Get("description").(string)), + Pipelines: expandDataFactoryTriggerPipeline(d.Get("pipeline").(*pluginsdk.Set).List()), + Type: datafactory.TypeBasicTriggerTypeBlobEventsTrigger, + } + + if v, ok := d.GetOk("annotations"); ok { + annotations := v.([]interface{}) + blobEventProps.Annotations = &annotations + } + + if v, ok := d.GetOk("additional_properties"); ok { + blobEventProps.AdditionalProperties = v.(map[string]interface{}) + } + + if v, ok := d.GetOk("blob_path_begins_with"); ok { + blobEventProps.BlobPathBeginsWith = utils.String(v.(string)) + } + + if v, ok := d.GetOk("blob_path_ends_with"); ok { + blobEventProps.BlobPathEndsWith = utils.String(v.(string)) + } + + trigger := datafactory.TriggerResource{ + Properties: blobEventProps, + } + + if _, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.FactoryName, id.Name, trigger, ""); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) + } + + d.SetId(id.ID()) + + return resourceDataFactoryTriggerBlobEventRead(d, meta) +} + +func resourceDataFactoryTriggerBlobEventRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataFactory.TriggersClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.TriggerID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, id.ResourceGroup, id.FactoryName, id.Name, "") + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + d.SetId("") + return nil + } + return fmt.Errorf("retrieving %s: %+v", id, err) + } + + blobEventsTrigger, ok := resp.Properties.AsBlobEventsTrigger() + if !ok { + return fmt.Errorf("classifiying %s: Expected: %q", id, datafactory.TypeBasicTriggerTypeBlobEventsTrigger) + } + + d.Set("name", id.Name) + d.Set("data_factory_id", parse.NewDataFactoryID(subscriptionId, id.ResourceGroup, id.FactoryName).ID()) + + d.Set("additional_properties", blobEventsTrigger.AdditionalProperties) + d.Set("description", blobEventsTrigger.Description) + + if err := d.Set("annotations", flattenDataFactoryAnnotations(blobEventsTrigger.Annotations)); err != nil { + return fmt.Errorf("setting `annotations`: %+v", err) + } + + if err := d.Set("pipeline", flattenDataFactoryTriggerPipeline(blobEventsTrigger.Pipelines)); err != nil { + return fmt.Errorf("setting `pipeline`: %+v", err) + } + + if props := blobEventsTrigger.BlobEventsTriggerTypeProperties; props != nil { + d.Set("storage_account_id", props.Scope) + d.Set("blob_path_begins_with", props.BlobPathBeginsWith) + d.Set("blob_path_ends_with", props.BlobPathEndsWith) + d.Set("ignore_empty_blobs", props.IgnoreEmptyBlobs) + + if err := d.Set("events", flattenDataFactoryTriggerBlobEvents(props.Events)); err != nil { + return fmt.Errorf("setting `events`: %+v", err) + } + } + + return nil +} + +func resourceDataFactoryTriggerBlobEventDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataFactory.TriggersClient + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.TriggerID(d.Id()) + if err != nil { + return err + } + + if _, err = client.Delete(ctx, id.ResourceGroup, id.FactoryName, id.Name); err != nil { + return fmt.Errorf("deleting %s: %+v", id, err) + } + + return nil +} + +func expandDataFactoryTriggerBlobEvents(input []interface{}) *[]datafactory.BlobEventTypes { + result := make([]datafactory.BlobEventTypes, 0) + for _, item := range input { + result = append(result, datafactory.BlobEventTypes(item.(string))) + } + return &result +} + +func expandDataFactoryTriggerPipeline(input []interface{}) *[]datafactory.TriggerPipelineReference { + if len(input) == 0 { + return nil + } + + result := make([]datafactory.TriggerPipelineReference, 0) + for _, item := range input { + raw := item.(map[string]interface{}) + + // issue https://github.com/hashicorp/terraform-plugin-sdk/issues/588 + // once it's resolved, we could remove the check empty logic + name := raw["name"].(string) + if name == "" { + continue + } + + result = append(result, datafactory.TriggerPipelineReference{ + PipelineReference: &datafactory.PipelineReference{ + ReferenceName: utils.String(raw["name"].(string)), + Type: utils.String("PipelineReference"), + }, + Parameters: raw["parameters"].(map[string]interface{}), + }) + } + return &result +} + +func flattenDataFactoryTriggerBlobEvents(input *[]datafactory.BlobEventTypes) []interface{} { + if input == nil { + return []interface{}{} + } + + result := make([]interface{}, 0) + for _, item := range *input { + result = append(result, string(item)) + } + return result +} + +func flattenDataFactoryTriggerPipeline(input *[]datafactory.TriggerPipelineReference) []interface{} { + if input == nil { + return []interface{}{} + } + + result := make([]interface{}, 0) + for _, item := range *input { + name := "" + if item.PipelineReference != nil && item.PipelineReference.ReferenceName != nil { + name = *item.PipelineReference.ReferenceName + } + + result = append(result, map[string]interface{}{ + "name": name, + "parameters": item.Parameters, + }) + } + return result +} diff --git a/azurerm/internal/services/datafactory/data_factory_trigger_blob_event_resource_test.go b/azurerm/internal/services/datafactory/data_factory_trigger_blob_event_resource_test.go new file mode 100644 index 000000000000..9debdfae75da --- /dev/null +++ b/azurerm/internal/services/datafactory/data_factory_trigger_blob_event_resource_test.go @@ -0,0 +1,211 @@ +package datafactory_test + +import ( + "context" + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type TriggerBlobEventResource struct { +} + +func TestAccDataFactoryTriggerBlobEvent_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_trigger_blob_event", "test") + r := TriggerBlobEventResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataFactoryTriggerBlobEvent_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_trigger_blob_event", "test") + r := TriggerBlobEventResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func TestAccDataFactoryTriggerBlobEvent_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_trigger_blob_event", "test") + r := TriggerBlobEventResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataFactoryTriggerBlobEvent_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_factory_trigger_blob_event", "test") + r := TriggerBlobEventResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func (t TriggerBlobEventResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.TriggerID(state.ID) + if err != nil { + return nil, err + } + + resp, err := clients.DataFactory.TriggersClient.Get(ctx, id.ResourceGroup, id.FactoryName, id.Name, "") + if err != nil { + return nil, fmt.Errorf("reading %s: %+v", id, err) + } + + return utils.Bool(resp.ID != nil), nil +} + +func (r TriggerBlobEventResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_data_factory_trigger_blob_event" "test" { + name = "acctestdf%d" + data_factory_id = azurerm_data_factory.test.id + storage_account_id = azurerm_storage_account.test.id + events = ["Microsoft.Storage.BlobCreated"] + blob_path_begins_with = "/abc/blobs" + + pipeline { + name = azurerm_data_factory_pipeline.test.name + } +} +`, r.template(data), data.RandomInteger) +} + +func (r TriggerBlobEventResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_data_factory_trigger_blob_event" "import" { + name = azurerm_data_factory_trigger_blob_event.test.name + data_factory_id = azurerm_data_factory_trigger_blob_event.test.data_factory_id + storage_account_id = azurerm_data_factory_trigger_blob_event.test.storage_account_id + events = azurerm_data_factory_trigger_blob_event.test.events + blob_path_begins_with = azurerm_data_factory_trigger_blob_event.test.blob_path_begins_with + + dynamic "pipeline" { + for_each = azurerm_data_factory_trigger_blob_event.test.pipeline + content { + name = pipeline.value.name + } + } +} +`, r.basic(data)) +} + +func (r TriggerBlobEventResource) complete(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_data_factory_trigger_blob_event" "test" { + name = "acctestdf%d" + data_factory_id = azurerm_data_factory.test.id + storage_account_id = azurerm_storage_account.test.id + events = ["Microsoft.Storage.BlobCreated", "Microsoft.Storage.BlobDeleted"] + blob_path_ends_with = ".txt" + ignore_empty_blobs = true + + annotations = ["test1", "test2", "test3"] + description = "test description" + + pipeline { + name = azurerm_data_factory_pipeline.test.name + parameters = { + Env = "Test" + } + } + + additional_properties = { + foo = "test1" + bar = "test2" + } +} +`, r.template(data), data.RandomInteger) +} + +func (TriggerBlobEventResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-df-%d" + location = "%s" +} + +resource "azurerm_data_factory" "test" { + name = "acctestdf%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_data_factory_pipeline" "test" { + name = "acctest%d" + resource_group_name = azurerm_resource_group.test.name + data_factory_name = azurerm_data_factory.test.name + + parameters = { + test = "testparameter" + } +} + +resource "azurerm_storage_account" "test" { + name = "acctestsa%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_tier = "Standard" + account_replication_type = "LRS" +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomString) +} diff --git a/azurerm/internal/services/datafactory/migration/data_factory.go b/azurerm/internal/services/datafactory/migration/data_factory.go index 3082b1ffc357..d99d8a500705 100644 --- a/azurerm/internal/services/datafactory/migration/data_factory.go +++ b/azurerm/internal/services/datafactory/migration/data_factory.go @@ -5,10 +5,12 @@ import ( "log" "github.com/Azure/azure-sdk-for-go/services/datafactory/mgmt/2018-06-01/datafactory" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) var _ pluginsdk.StateUpgrade = DataFactoryV0ToV1{} +var _ pluginsdk.StateUpgrade = DataFactoryV1ToV2{} type DataFactoryV0ToV1 struct{} @@ -139,3 +141,159 @@ func (DataFactoryV0ToV1) UpgradeFunc() pluginsdk.StateUpgraderFunc { return rawState, nil } } + +type DataFactoryV1ToV2 struct{} + +func (DataFactoryV1ToV2) Schema() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + }, + + "location": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + }, + + "resource_group_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + }, + + "identity": { + Type: pluginsdk.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "type": { + Type: pluginsdk.TypeString, + Required: true, + }, + "identity_ids": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + "principal_id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "tenant_id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + }, + }, + }, + + "github_configuration": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"vsts_configuration"}, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "account_name": { + Type: pluginsdk.TypeString, + Required: true, + }, + "branch_name": { + Type: pluginsdk.TypeString, + Required: true, + }, + "git_url": { + Type: pluginsdk.TypeString, + Required: true, + }, + "repository_name": { + Type: pluginsdk.TypeString, + Required: true, + }, + "root_folder": { + Type: pluginsdk.TypeString, + Required: true, + }, + }, + }, + }, + + "vsts_configuration": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"github_configuration"}, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "account_name": { + Type: pluginsdk.TypeString, + Required: true, + }, + "branch_name": { + Type: pluginsdk.TypeString, + Required: true, + }, + "project_name": { + Type: pluginsdk.TypeString, + Required: true, + }, + "repository_name": { + Type: pluginsdk.TypeString, + Required: true, + }, + "root_folder": { + Type: pluginsdk.TypeString, + Required: true, + }, + "tenant_id": { + Type: pluginsdk.TypeString, + Required: true, + }, + }, + }, + }, + "public_network_enabled": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: true, + }, + + "customer_managed_key_id": { + Type: pluginsdk.TypeString, + Optional: true, + RequiredWith: []string{"identity.0.identity_ids"}, + }, + + "tags": { + Type: pluginsdk.TypeMap, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + } +} + +func (DataFactoryV1ToV2) UpgradeFunc() pluginsdk.StateUpgraderFunc { + return func(ctx context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + log.Printf("[DEBUG] Updating `id` if resourceName is in upper case") + + oldId := rawState["id"].(string) + id, err := parse.DataFactoryID(oldId) + if err != nil { + return nil, err + } + + id.FactoryName = rawState["name"].(string) + rawState["id"] = id.ID() + + return rawState, nil + } +} diff --git a/azurerm/internal/services/datafactory/migration/data_factory_test.go b/azurerm/internal/services/datafactory/migration/data_factory_test.go new file mode 100644 index 000000000000..fd209429c9ed --- /dev/null +++ b/azurerm/internal/services/datafactory/migration/data_factory_test.go @@ -0,0 +1,33 @@ +package migration + +import ( + "context" + "testing" +) + +func TestDataFactoryMigrateState(t *testing.T) { + cases := map[string]struct { + StateVersion int + InputAttributes map[string]interface{} + ExpectedNewID string + }{ + "name_upper_case": { + StateVersion: 1, + InputAttributes: map[string]interface{}{ + "id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/acctest", + "name": "ACCTEST", + "location": "westeurope", + "resource_group_name": "resGroup1", + }, + ExpectedNewID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/ACCTEST", + }, + } + + for _, tc := range cases { + newID, _ := DataFactoryV1ToV2{}.UpgradeFunc()(context.TODO(), tc.InputAttributes, nil) + + if newID["id"].(string) != tc.ExpectedNewID { + t.Fatalf("ID migration failed, expected %q, got: %q", tc.ExpectedNewID, newID["id"].(string)) + } + } +} diff --git a/azurerm/internal/services/datafactory/parse/data_factory.go b/azurerm/internal/services/datafactory/parse/data_factory.go new file mode 100644 index 000000000000..b9dc28614eb3 --- /dev/null +++ b/azurerm/internal/services/datafactory/parse/data_factory.go @@ -0,0 +1,69 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type DataFactoryId struct { + SubscriptionId string + ResourceGroup string + FactoryName string +} + +func NewDataFactoryID(subscriptionId, resourceGroup, factoryName string) DataFactoryId { + return DataFactoryId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + FactoryName: factoryName, + } +} + +func (id DataFactoryId) String() string { + segments := []string{ + fmt.Sprintf("Factory Name %q", id.FactoryName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Data Factory", segmentsStr) +} + +func (id DataFactoryId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataFactory/factories/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.FactoryName) +} + +// DataFactoryID parses a DataFactory ID into an DataFactoryId struct +func DataFactoryID(input string) (*DataFactoryId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := DataFactoryId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.FactoryName, err = id.PopSegment("factories"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/datafactory/parse/data_factory_test.go b/azurerm/internal/services/datafactory/parse/data_factory_test.go new file mode 100644 index 000000000000..95f11fb8c40a --- /dev/null +++ b/azurerm/internal/services/datafactory/parse/data_factory_test.go @@ -0,0 +1,112 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = DataFactoryId{} + +func TestDataFactoryIDFormatter(t *testing.T) { + actual := NewDataFactoryID("12345678-1234-9876-4563-123456789012", "resGroup1", "facName1").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/facName1" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestDataFactoryID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *DataFactoryId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing FactoryName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/", + Error: true, + }, + + { + // missing value for FactoryName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/facName1", + Expected: &DataFactoryId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + FactoryName: "facName1", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.DATAFACTORY/FACTORIES/FACNAME1", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := DataFactoryID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.FactoryName != v.Expected.FactoryName { + t.Fatalf("Expected %q but got %q for FactoryName", v.Expected.FactoryName, actual.FactoryName) + } + } +} diff --git a/azurerm/internal/services/datafactory/parse/managed_private_endpoint.go b/azurerm/internal/services/datafactory/parse/managed_private_endpoint.go new file mode 100644 index 000000000000..4f88bfbb701c --- /dev/null +++ b/azurerm/internal/services/datafactory/parse/managed_private_endpoint.go @@ -0,0 +1,81 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type ManagedPrivateEndpointId struct { + SubscriptionId string + ResourceGroup string + FactoryName string + ManagedVirtualNetworkName string + Name string +} + +func NewManagedPrivateEndpointID(subscriptionId, resourceGroup, factoryName, managedVirtualNetworkName, name string) ManagedPrivateEndpointId { + return ManagedPrivateEndpointId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + FactoryName: factoryName, + ManagedVirtualNetworkName: managedVirtualNetworkName, + Name: name, + } +} + +func (id ManagedPrivateEndpointId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Managed Virtual Network Name %q", id.ManagedVirtualNetworkName), + fmt.Sprintf("Factory Name %q", id.FactoryName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Managed Private Endpoint", segmentsStr) +} + +func (id ManagedPrivateEndpointId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataFactory/factories/%s/managedVirtualNetworks/%s/managedPrivateEndpoints/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.FactoryName, id.ManagedVirtualNetworkName, id.Name) +} + +// ManagedPrivateEndpointID parses a ManagedPrivateEndpoint ID into an ManagedPrivateEndpointId struct +func ManagedPrivateEndpointID(input string) (*ManagedPrivateEndpointId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := ManagedPrivateEndpointId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.FactoryName, err = id.PopSegment("factories"); err != nil { + return nil, err + } + if resourceId.ManagedVirtualNetworkName, err = id.PopSegment("managedVirtualNetworks"); err != nil { + return nil, err + } + if resourceId.Name, err = id.PopSegment("managedPrivateEndpoints"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/datafactory/parse/managed_private_endpoint_test.go b/azurerm/internal/services/datafactory/parse/managed_private_endpoint_test.go new file mode 100644 index 000000000000..e05d14330822 --- /dev/null +++ b/azurerm/internal/services/datafactory/parse/managed_private_endpoint_test.go @@ -0,0 +1,144 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = ManagedPrivateEndpointId{} + +func TestManagedPrivateEndpointIDFormatter(t *testing.T) { + actual := NewManagedPrivateEndpointID("12345678-1234-9876-4563-123456789012", "resGroup1", "factory1", "vnet1", "endpoint1").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/factory1/managedVirtualNetworks/vnet1/managedPrivateEndpoints/endpoint1" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestManagedPrivateEndpointID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ManagedPrivateEndpointId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing FactoryName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/", + Error: true, + }, + + { + // missing value for FactoryName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/", + Error: true, + }, + + { + // missing ManagedVirtualNetworkName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/factory1/", + Error: true, + }, + + { + // missing value for ManagedVirtualNetworkName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/factory1/managedVirtualNetworks/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/factory1/managedVirtualNetworks/vnet1/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/factory1/managedVirtualNetworks/vnet1/managedPrivateEndpoints/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/factory1/managedVirtualNetworks/vnet1/managedPrivateEndpoints/endpoint1", + Expected: &ManagedPrivateEndpointId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + FactoryName: "factory1", + ManagedVirtualNetworkName: "vnet1", + Name: "endpoint1", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.DATAFACTORY/FACTORIES/FACTORY1/MANAGEDVIRTUALNETWORKS/VNET1/MANAGEDPRIVATEENDPOINTS/ENDPOINT1", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ManagedPrivateEndpointID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.FactoryName != v.Expected.FactoryName { + t.Fatalf("Expected %q but got %q for FactoryName", v.Expected.FactoryName, actual.FactoryName) + } + if actual.ManagedVirtualNetworkName != v.Expected.ManagedVirtualNetworkName { + t.Fatalf("Expected %q but got %q for ManagedVirtualNetworkName", v.Expected.ManagedVirtualNetworkName, actual.ManagedVirtualNetworkName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/datafactory/parse/trigger.go b/azurerm/internal/services/datafactory/parse/trigger.go new file mode 100644 index 000000000000..94ffdafc3c75 --- /dev/null +++ b/azurerm/internal/services/datafactory/parse/trigger.go @@ -0,0 +1,75 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type TriggerId struct { + SubscriptionId string + ResourceGroup string + FactoryName string + Name string +} + +func NewTriggerID(subscriptionId, resourceGroup, factoryName, name string) TriggerId { + return TriggerId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + FactoryName: factoryName, + Name: name, + } +} + +func (id TriggerId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Factory Name %q", id.FactoryName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Trigger", segmentsStr) +} + +func (id TriggerId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataFactory/factories/%s/triggers/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.FactoryName, id.Name) +} + +// TriggerID parses a Trigger ID into an TriggerId struct +func TriggerID(input string) (*TriggerId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := TriggerId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.FactoryName, err = id.PopSegment("factories"); err != nil { + return nil, err + } + if resourceId.Name, err = id.PopSegment("triggers"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/datafactory/parse/trigger_test.go b/azurerm/internal/services/datafactory/parse/trigger_test.go new file mode 100644 index 000000000000..3af5d0d37e78 --- /dev/null +++ b/azurerm/internal/services/datafactory/parse/trigger_test.go @@ -0,0 +1,128 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = TriggerId{} + +func TestTriggerIDFormatter(t *testing.T) { + actual := NewTriggerID("12345678-1234-9876-4563-123456789012", "resGroup1", "factory1", "trigger1").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/factory1/triggers/trigger1" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestTriggerID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *TriggerId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing FactoryName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/", + Error: true, + }, + + { + // missing value for FactoryName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/factory1/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/factory1/triggers/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/factory1/triggers/trigger1", + Expected: &TriggerId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + FactoryName: "factory1", + Name: "trigger1", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.DATAFACTORY/FACTORIES/FACTORY1/TRIGGERS/TRIGGER1", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := TriggerID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.FactoryName != v.Expected.FactoryName { + t.Fatalf("Expected %q but got %q for FactoryName", v.Expected.FactoryName, actual.FactoryName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/datafactory/registration.go b/azurerm/internal/services/datafactory/registration.go index a9581269d7d5..5a9b509bbb1b 100644 --- a/azurerm/internal/services/datafactory/registration.go +++ b/azurerm/internal/services/datafactory/registration.go @@ -30,6 +30,7 @@ func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { return map[string]*pluginsdk.Resource{ "azurerm_data_factory": resourceDataFactory(), "azurerm_data_factory_dataset_azure_blob": resourceDataFactoryDatasetAzureBlob(), + "azurerm_data_factory_dataset_binary": resourceDataFactoryDatasetBinary(), "azurerm_data_factory_dataset_cosmosdb_sqlapi": resourceDataFactoryDatasetCosmosDbSQLAPI(), "azurerm_data_factory_dataset_delimited_text": resourceDataFactoryDatasetDelimitedText(), "azurerm_data_factory_dataset_http": resourceDataFactoryDatasetHTTP(), @@ -39,27 +40,34 @@ func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { "azurerm_data_factory_dataset_postgresql": resourceDataFactoryDatasetPostgreSQL(), "azurerm_data_factory_dataset_snowflake": resourceDataFactoryDatasetSnowflake(), "azurerm_data_factory_dataset_sql_server_table": resourceDataFactoryDatasetSQLServerTable(), + "azurerm_data_factory_custom_dataset": resourceDataFactoryCustomDataset(), "azurerm_data_factory_integration_runtime_managed": resourceDataFactoryIntegrationRuntimeManaged(), "azurerm_data_factory_integration_runtime_azure": resourceDataFactoryIntegrationRuntimeAzure(), "azurerm_data_factory_integration_runtime_azure_ssis": resourceDataFactoryIntegrationRuntimeAzureSsis(), "azurerm_data_factory_integration_runtime_self_hosted": resourceDataFactoryIntegrationRuntimeSelfHosted(), + "azurerm_data_factory_linked_custom_service": resourceDataFactoryLinkedCustomService(), "azurerm_data_factory_linked_service_azure_blob_storage": resourceDataFactoryLinkedServiceAzureBlobStorage(), "azurerm_data_factory_linked_service_azure_databricks": resourceDataFactoryLinkedServiceAzureDatabricks(), - "azurerm_data_factory_linked_service_azure_table_storage": resourceDataFactoryLinkedServiceAzureTableStorage(), "azurerm_data_factory_linked_service_azure_file_storage": resourceDataFactoryLinkedServiceAzureFileStorage(), - "azurerm_data_factory_linked_service_azure_sql_database": resourceDataFactoryLinkedServiceAzureSQLDatabase(), "azurerm_data_factory_linked_service_azure_function": resourceDataFactoryLinkedServiceAzureFunction(), + "azurerm_data_factory_linked_service_azure_search": resourceDataFactoryLinkedServiceAzureSearch(), + "azurerm_data_factory_linked_service_azure_sql_database": resourceDataFactoryLinkedServiceAzureSQLDatabase(), + "azurerm_data_factory_linked_service_azure_table_storage": resourceDataFactoryLinkedServiceAzureTableStorage(), "azurerm_data_factory_linked_service_cosmosdb": resourceDataFactoryLinkedServiceCosmosDb(), "azurerm_data_factory_linked_service_data_lake_storage_gen2": resourceDataFactoryLinkedServiceDataLakeStorageGen2(), "azurerm_data_factory_linked_service_key_vault": resourceDataFactoryLinkedServiceKeyVault(), + "azurerm_data_factory_linked_service_kusto": resourceDataFactoryLinkedServiceKusto(), "azurerm_data_factory_linked_service_mysql": resourceDataFactoryLinkedServiceMySQL(), + "azurerm_data_factory_linked_service_odata": resourceArmDataFactoryLinkedServiceOData(), "azurerm_data_factory_linked_service_postgresql": resourceDataFactoryLinkedServicePostgreSQL(), "azurerm_data_factory_linked_service_sftp": resourceDataFactoryLinkedServiceSFTP(), "azurerm_data_factory_linked_service_snowflake": resourceDataFactoryLinkedServiceSnowflake(), "azurerm_data_factory_linked_service_sql_server": resourceDataFactoryLinkedServiceSQLServer(), "azurerm_data_factory_linked_service_synapse": resourceDataFactoryLinkedServiceSynapse(), "azurerm_data_factory_linked_service_web": resourceDataFactoryLinkedServiceWeb(), + "azurerm_data_factory_managed_private_endpoint": resourceDataFactoryManagedPrivateEndpoint(), "azurerm_data_factory_pipeline": resourceDataFactoryPipeline(), + "azurerm_data_factory_trigger_blob_event": resourceDataFactoryTriggerBlobEvent(), "azurerm_data_factory_trigger_schedule": resourceDataFactoryTriggerSchedule(), } } diff --git a/azurerm/internal/services/datafactory/resourceids.go b/azurerm/internal/services/datafactory/resourceids.go index e266b38d5dd1..0da1d575577b 100644 --- a/azurerm/internal/services/datafactory/resourceids.go +++ b/azurerm/internal/services/datafactory/resourceids.go @@ -1,5 +1,8 @@ package datafactory +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=DataFactory -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/facName1 +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=DataSet -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/facName1/datasets/dataSet1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=IntegrationRuntime -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/factory1/integrationruntimes/runtime1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=LinkedService -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/factory1/linkedservices/linkedService1 -//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=DataSet -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/facName1/datasets/dataSet1 +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=ManagedPrivateEndpoint -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/factory1/managedVirtualNetworks/vnet1/managedPrivateEndpoints/endpoint1 +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=Trigger -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/factory1/triggers/trigger1 diff --git a/azurerm/internal/services/datafactory/validate/data_factory_id.go b/azurerm/internal/services/datafactory/validate/data_factory_id.go new file mode 100644 index 000000000000..a0bb3400d788 --- /dev/null +++ b/azurerm/internal/services/datafactory/validate/data_factory_id.go @@ -0,0 +1,23 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/parse" +) + +func DataFactoryID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := parse.DataFactoryID(v); err != nil { + errors = append(errors, err) + } + + return +} diff --git a/azurerm/internal/services/datafactory/validate/data_factory_id_test.go b/azurerm/internal/services/datafactory/validate/data_factory_id_test.go new file mode 100644 index 000000000000..b691f6e8dc9c --- /dev/null +++ b/azurerm/internal/services/datafactory/validate/data_factory_id_test.go @@ -0,0 +1,76 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import "testing" + +func TestDataFactoryID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + // missing FactoryName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/", + Valid: false, + }, + + { + // missing value for FactoryName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/", + Valid: false, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/facName1", + Valid: true, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.DATAFACTORY/FACTORIES/FACNAME1", + Valid: false, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := DataFactoryID(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/azurerm/internal/services/datafactory/validate/datafactory.go b/azurerm/internal/services/datafactory/validate/datafactory.go index 2eacab343c50..1a66e1ea2013 100644 --- a/azurerm/internal/services/datafactory/validate/datafactory.go +++ b/azurerm/internal/services/datafactory/validate/datafactory.go @@ -28,3 +28,19 @@ func DataFactoryName() pluginsdk.SchemaValidateFunc { return warnings, errors } } + +func DataFactoryManagedPrivateEndpointName() pluginsdk.SchemaValidateFunc { + return func(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", k)) + return + } + + if !regexp.MustCompile(`^[A-Za-z0-9_]+$`).MatchString(v) { + errors = append(errors, fmt.Errorf("invalid Data Factory Managed Private Endpoint name, must match the regular expression ^[A-Za-z0-9_]+")) + } + + return warnings, errors + } +} diff --git a/azurerm/internal/services/datafactory/validate/datafactory_test.go b/azurerm/internal/services/datafactory/validate/datafactory_test.go index b0e05dbcfdc9..1a864aa34df8 100644 --- a/azurerm/internal/services/datafactory/validate/datafactory_test.go +++ b/azurerm/internal/services/datafactory/validate/datafactory_test.go @@ -55,3 +55,49 @@ func TestValidateDataFactoryName(t *testing.T) { } } } + +func TestValidateDataFactoryManagedPrivateEndpointName(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + { + // empty + Input: "", + Valid: false, + }, + { + // invalid character + Input: "/", + Valid: false, + }, + { + // invalid character + Input: "ab-", + Valid: false, + }, + { + // invalid character + Input: "ab*", + Valid: false, + }, + { + // valid + Input: "Aa1", + Valid: true, + }, + { + // valid + Input: "a_", + Valid: true, + }, + } + for _, tc := range cases { + _, errors := DataFactoryManagedPrivateEndpointName()(tc.Input, "name") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/azurerm/internal/services/datafactory/validate/managed_private_endpoint_id.go b/azurerm/internal/services/datafactory/validate/managed_private_endpoint_id.go new file mode 100644 index 000000000000..175c5b827147 --- /dev/null +++ b/azurerm/internal/services/datafactory/validate/managed_private_endpoint_id.go @@ -0,0 +1,23 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/parse" +) + +func ManagedPrivateEndpointID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := parse.ManagedPrivateEndpointID(v); err != nil { + errors = append(errors, err) + } + + return +} diff --git a/azurerm/internal/services/datafactory/validate/managed_private_endpoint_id_test.go b/azurerm/internal/services/datafactory/validate/managed_private_endpoint_id_test.go new file mode 100644 index 000000000000..50805600c88b --- /dev/null +++ b/azurerm/internal/services/datafactory/validate/managed_private_endpoint_id_test.go @@ -0,0 +1,100 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import "testing" + +func TestManagedPrivateEndpointID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + // missing FactoryName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/", + Valid: false, + }, + + { + // missing value for FactoryName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/", + Valid: false, + }, + + { + // missing ManagedVirtualNetworkName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/factory1/", + Valid: false, + }, + + { + // missing value for ManagedVirtualNetworkName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/factory1/managedVirtualNetworks/", + Valid: false, + }, + + { + // missing Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/factory1/managedVirtualNetworks/vnet1/", + Valid: false, + }, + + { + // missing value for Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/factory1/managedVirtualNetworks/vnet1/managedPrivateEndpoints/", + Valid: false, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/factory1/managedVirtualNetworks/vnet1/managedPrivateEndpoints/endpoint1", + Valid: true, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.DATAFACTORY/FACTORIES/FACTORY1/MANAGEDVIRTUALNETWORKS/VNET1/MANAGEDPRIVATEENDPOINTS/ENDPOINT1", + Valid: false, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := ManagedPrivateEndpointID(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/azurerm/internal/services/datafactory/validate/trigger_id.go b/azurerm/internal/services/datafactory/validate/trigger_id.go new file mode 100644 index 000000000000..9fa44bd0afd6 --- /dev/null +++ b/azurerm/internal/services/datafactory/validate/trigger_id.go @@ -0,0 +1,23 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/datafactory/parse" +) + +func TriggerID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := parse.TriggerID(v); err != nil { + errors = append(errors, err) + } + + return +} diff --git a/azurerm/internal/services/datafactory/validate/trigger_id_test.go b/azurerm/internal/services/datafactory/validate/trigger_id_test.go new file mode 100644 index 000000000000..8bc9cd7e36db --- /dev/null +++ b/azurerm/internal/services/datafactory/validate/trigger_id_test.go @@ -0,0 +1,88 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import "testing" + +func TestTriggerID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + // missing FactoryName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/", + Valid: false, + }, + + { + // missing value for FactoryName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/", + Valid: false, + }, + + { + // missing Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/factory1/", + Valid: false, + }, + + { + // missing value for Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/factory1/triggers/", + Valid: false, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DataFactory/factories/factory1/triggers/trigger1", + Valid: true, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.DATAFACTORY/FACTORIES/FACTORY1/TRIGGERS/TRIGGER1", + Valid: false, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := TriggerID(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/azurerm/internal/services/dataprotection/client/client.go b/azurerm/internal/services/dataprotection/client/client.go new file mode 100644 index 000000000000..f56b70752bba --- /dev/null +++ b/azurerm/internal/services/dataprotection/client/client.go @@ -0,0 +1,29 @@ +package client + +import ( + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/common" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection/legacysdk/dataprotection" +) + +type Client struct { + BackupVaultClient *dataprotection.BackupVaultsClient + BackupPolicyClient *dataprotection.BackupPoliciesClient + BackupInstanceClient *dataprotection.BackupInstancesClient +} + +func NewClient(o *common.ClientOptions) *Client { + backupVaultClient := dataprotection.NewBackupVaultsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&backupVaultClient.Client, o.ResourceManagerAuthorizer) + + backupPolicyClient := dataprotection.NewBackupPoliciesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&backupPolicyClient.Client, o.ResourceManagerAuthorizer) + + backupInstanceClient := dataprotection.NewBackupInstancesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&backupInstanceClient.Client, o.ResourceManagerAuthorizer) + + return &Client{ + BackupVaultClient: &backupVaultClient, + BackupPolicyClient: &backupPolicyClient, + BackupInstanceClient: &backupInstanceClient, + } +} diff --git a/azurerm/internal/services/dataprotection/data_protection_backup_instance_postgresql_resource.go b/azurerm/internal/services/dataprotection/data_protection_backup_instance_postgresql_resource.go new file mode 100644 index 000000000000..16876d30e20c --- /dev/null +++ b/azurerm/internal/services/dataprotection/data_protection_backup_instance_postgresql_resource.go @@ -0,0 +1,224 @@ +package dataprotection + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection/legacysdk/dataprotection" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection/validate" + postgresParse "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/postgres/parse" + postgresValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/postgres/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + azSchema "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceDataProtectionBackupInstancePostgreSQL() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceDataProtectionBackupInstancePostgreSQLCreateUpdate, + Read: resourceDataProtectionBackupInstancePostgreSQLRead, + Update: resourceDataProtectionBackupInstancePostgreSQLCreateUpdate, + Delete: resourceDataProtectionBackupInstancePostgreSQLDelete, + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Importer: azSchema.ValidateResourceIDPriorToImport(func(id string) error { + _, err := parse.BackupInstanceID(id) + return err + }), + + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + }, + + "location": location.Schema(), + + "vault_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.BackupVaultID, + }, + + "database_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: postgresValidate.DatabaseID, + }, + + "backup_policy_id": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validate.BackupPolicyID, + }, + }, + } +} +func resourceDataProtectionBackupInstancePostgreSQLCreateUpdate(d *schema.ResourceData, meta interface{}) error { + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + client := meta.(*clients.Client).DataProtection.BackupInstanceClient + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + name := d.Get("name").(string) + vaultId, _ := parse.BackupVaultID(d.Get("vault_id").(string)) + + id := parse.NewBackupInstanceID(subscriptionId, vaultId.ResourceGroup, vaultId.Name, name) + + if d.IsNewResource() { + existing, err := client.Get(ctx, id.BackupVaultName, id.ResourceGroup, id.Name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("checking for existing DataProtection BackupInstance (%q): %+v", id, err) + } + } + if !utils.ResponseWasNotFound(existing.Response) { + return tf.ImportAsExistsError("azurerm_data_protection_backup_instance_postgresql", id.ID()) + } + } + + databaseId, _ := postgresParse.DatabaseID(d.Get("database_id").(string)) + location := location.Normalize(d.Get("location").(string)) + serverId := postgresParse.NewServerID(databaseId.SubscriptionId, databaseId.ResourceGroup, databaseId.ServerName) + policyId, _ := parse.BackupPolicyID(d.Get("backup_policy_id").(string)) + + parameters := dataprotection.BackupInstanceResource{ + Properties: &dataprotection.BackupInstance{ + DataSourceInfo: &dataprotection.Datasource{ + DatasourceType: utils.String("Microsoft.DBforPostgreSQL/servers/databases"), + ObjectType: utils.String("Datasource"), + ResourceID: utils.String(databaseId.ID()), + ResourceLocation: utils.String(location), + ResourceName: utils.String(databaseId.Name), + ResourceType: utils.String("Microsoft.DBforPostgreSQL/servers/databases"), + ResourceURI: utils.String(""), + }, + DataSourceSetInfo: &dataprotection.DatasourceSet{ + DatasourceType: utils.String("Microsoft.DBForPostgreSQL/servers"), + ObjectType: utils.String("DatasourceSet"), + ResourceID: utils.String(serverId.ID()), + ResourceLocation: utils.String(location), + ResourceName: utils.String(serverId.Name), + ResourceType: utils.String("Microsoft.DBForPostgreSQL/servers"), + ResourceURI: utils.String(""), + }, + FriendlyName: utils.String(id.Name), + PolicyInfo: &dataprotection.PolicyInfo{ + PolicyID: utils.String(policyId.ID()), + }, + }, + } + future, err := client.CreateOrUpdate(ctx, id.BackupVaultName, id.ResourceGroup, id.Name, parameters) + if err != nil { + return fmt.Errorf("creating/updating DataProtection BackupInstance (%q): %+v", id, err) + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for creation/update of the DataProtection BackupInstance (%q): %+v", id, err) + } + + deadline, ok := ctx.Deadline() + if !ok { + return fmt.Errorf("context had no deadline") + } + stateConf := &pluginsdk.StateChangeConf{ + Pending: []string{string(dataprotection.ConfiguringProtection), string(dataprotection.UpdatingProtection)}, + Target: []string{string(dataprotection.ProtectionConfigured)}, + Refresh: policyProtectionStateRefreshFunc(ctx, client, id), + MinTimeout: 1 * time.Minute, + Timeout: time.Until(deadline), + } + + if _, err = stateConf.WaitForStateContext(ctx); err != nil { + return fmt.Errorf("waiting for BackupInstance(%q) policy protection to be completed: %+v", id, err) + } + + d.SetId(id.ID()) + return resourceDataProtectionBackupInstancePostgreSQLRead(d, meta) +} + +func resourceDataProtectionBackupInstancePostgreSQLRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataProtection.BackupInstanceClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.BackupInstanceID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, id.BackupVaultName, id.ResourceGroup, id.Name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[INFO] dataprotection %q does not exist - removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("retrieving DataProtection BackupInstance (%q): %+v", id, err) + } + vaultId := parse.NewBackupVaultID(id.SubscriptionId, id.ResourceGroup, id.BackupVaultName) + d.Set("name", id.Name) + d.Set("vault_id", vaultId.ID()) + if props := resp.Properties; props != nil { + if props.DataSourceInfo != nil { + d.Set("database_id", props.DataSourceInfo.ResourceID) + d.Set("location", props.DataSourceInfo.ResourceLocation) + } + if props.PolicyInfo != nil { + d.Set("backup_policy_id", props.PolicyInfo.PolicyID) + } + } + return nil +} + +func resourceDataProtectionBackupInstancePostgreSQLDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataProtection.BackupInstanceClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.BackupInstanceID(d.Id()) + if err != nil { + return err + } + + future, err := client.Delete(ctx, id.BackupVaultName, id.ResourceGroup, id.Name) + if err != nil { + return fmt.Errorf("deleting DataProtection BackupInstance (%q): %+v", id, err) + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for deletion of the DataProtection BackupInstance (%q): %+v", id.Name, err) + } + return nil +} + +func policyProtectionStateRefreshFunc(ctx context.Context, client *dataprotection.BackupInstancesClient, id parse.BackupInstanceId) pluginsdk.StateRefreshFunc { + return func() (interface{}, string, error) { + res, err := client.Get(ctx, id.BackupVaultName, id.ResourceGroup, id.Name) + if err != nil { + return nil, "", fmt.Errorf("retrieving DataProtection BackupInstance (%q): %+v", id, err) + } + if res.Properties == nil || res.Properties.ProtectionStatus == nil { + return nil, "", fmt.Errorf("error reading DataProtection BackupInstance (%q) protection status: %+v", id, err) + } + + return res, string(res.Properties.ProtectionStatus.Status), nil + } +} diff --git a/azurerm/internal/services/dataprotection/data_protection_backup_instance_postgresql_resource_test.go b/azurerm/internal/services/dataprotection/data_protection_backup_instance_postgresql_resource_test.go new file mode 100644 index 000000000000..f0722d547b2d --- /dev/null +++ b/azurerm/internal/services/dataprotection/data_protection_backup_instance_postgresql_resource_test.go @@ -0,0 +1,214 @@ +package dataprotection_test + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type DataProtectionBackupInstancePostgreSQLResource struct{} + +func TestAccDataProtectionBackupInstancePostgreSQL_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_instance_postgresql", "test") + r := DataProtectionBackupInstancePostgreSQLResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataProtectionBackupInstancePostgreSQL_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_instance_postgresql", "test") + r := DataProtectionBackupInstancePostgreSQLResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func TestAccDataProtectionBackupInstancePostgreSQL_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_instance_postgresql", "test") + r := DataProtectionBackupInstancePostgreSQLResource{} + data.ResourceTest(t, r, []resource.TestStep{ + { + Config: r.complete(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataProtectionBackupInstancePostgreSQL_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_instance_postgresql", "test") + r := DataProtectionBackupInstancePostgreSQLResource{} + data.ResourceTest(t, r, []resource.TestStep{ + { + Config: r.basic(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.complete(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func (r DataProtectionBackupInstancePostgreSQLResource) Exists(ctx context.Context, client *clients.Client, state *terraform.InstanceState) (*bool, error) { + id, err := parse.BackupInstanceID(state.ID) + if err != nil { + return nil, err + } + resp, err := client.DataProtection.BackupInstanceClient.Get(ctx, id.BackupVaultName, id.ResourceGroup, id.Name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return utils.Bool(false), nil + } + return nil, fmt.Errorf("retrieving DataProtection BackupInstance (%q): %+v", id, err) + } + return utils.Bool(true), nil +} + +func (r DataProtectionBackupInstancePostgreSQLResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctest-dataprotection-%d" + location = "%s" +} + +resource "azurerm_postgresql_server" "test" { + name = "acctest-postgresql-server-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + sku_name = "B_Gen5_2" + + storage_mb = 5120 + backup_retention_days = 7 + geo_redundant_backup_enabled = false + auto_grow_enabled = true + + administrator_login = "psqladminun" + administrator_login_password = "H@Sh1CoR3!" + version = "9.5" + ssl_enforcement_enabled = true +} + +resource "azurerm_postgresql_database" "test" { + name = "acctest-postgresql-database-%d" + resource_group_name = azurerm_resource_group.test.name + server_name = azurerm_postgresql_server.test.name + charset = "UTF8" + collation = "English_United States.1252" +} + +resource "azurerm_data_protection_backup_vault" "test" { + name = "acctest-dataprotection-vault-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + datastore_type = "VaultStore" + redundancy = "LocallyRedundant" + + identity { + type = "SystemAssigned" + } +} + +resource "azurerm_data_protection_backup_policy_postgresql" "test" { + name = "acctest-dp-%d" + resource_group_name = azurerm_resource_group.test.name + vault_name = azurerm_data_protection_backup_vault.test.name + backup_repeating_time_intervals = ["R/2021-05-23T02:30:00+00:00/P1W"] + default_retention_duration = "P4M" +} + +resource "azurerm_data_protection_backup_policy_postgresql" "another" { + name = "acctest-dp-second-%d" + resource_group_name = azurerm_resource_group.test.name + vault_name = azurerm_data_protection_backup_vault.test.name + backup_repeating_time_intervals = ["R/2021-05-23T02:30:00+00:00/P1W"] + default_retention_duration = "P3M" +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger) +} + +func (r DataProtectionBackupInstancePostgreSQLResource) basic(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_data_protection_backup_instance_postgresql" "test" { + name = "acctest-dbi-%d" + location = azurerm_resource_group.test.location + vault_id = azurerm_data_protection_backup_vault.test.id + database_id = azurerm_postgresql_database.test.id + backup_policy_id = azurerm_data_protection_backup_policy_postgresql.test.id +} +`, template, data.RandomInteger) +} + +func (r DataProtectionBackupInstancePostgreSQLResource) requiresImport(data acceptance.TestData) string { + config := r.basic(data) + return fmt.Sprintf(` +%s + +resource "azurerm_data_protection_backup_instance_postgresql" "import" { + name = azurerm_data_protection_backup_instance_postgresql.test.name + location = azurerm_resource_group.test.location + vault_id = azurerm_data_protection_backup_instance_postgresql.test.vault_id + database_id = azurerm_postgresql_database.test.id + backup_policy_id = azurerm_data_protection_backup_policy_postgresql.test.id +} +`, config) +} + +func (r DataProtectionBackupInstancePostgreSQLResource) complete(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_data_protection_backup_instance_postgresql" "test" { + name = "acctest-dbi-%d" + location = azurerm_resource_group.test.location + vault_id = azurerm_data_protection_backup_vault.test.id + database_id = azurerm_postgresql_database.test.id + backup_policy_id = azurerm_data_protection_backup_policy_postgresql.another.id +} +`, template, data.RandomInteger) +} diff --git a/azurerm/internal/services/dataprotection/data_protection_backup_policy_blob_storage_resource.go b/azurerm/internal/services/dataprotection/data_protection_backup_policy_blob_storage_resource.go new file mode 100644 index 000000000000..f64774f5dbfd --- /dev/null +++ b/azurerm/internal/services/dataprotection/data_protection_backup_policy_blob_storage_resource.go @@ -0,0 +1,189 @@ +package dataprotection + +import ( + "fmt" + "log" + "regexp" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + helperValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection/legacysdk/dataprotection" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection/validate" + azSchema "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceDataProtectionBackupPolicyBlobStorage() *schema.Resource { + return &schema.Resource{ + Create: resourceDataProtectionBackupPolicyBlobStorageCreate, + Read: resourceDataProtectionBackupPolicyBlobStorageRead, + Delete: resourceDataProtectionBackupPolicyBlobStorageDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Read: schema.DefaultTimeout(5 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Importer: azSchema.ValidateResourceIDPriorToImport(func(id string) error { + _, err := parse.BackupPolicyID(id) + return err + }), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringMatch( + regexp.MustCompile("^[-a-zA-Z0-9]{3,150}$"), + "DataProtection BackupPolicy name must be 3 - 150 characters long, contain only letters, numbers and hyphens.", + ), + }, + + "vault_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.BackupVaultID, + }, + + "retention_duration": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: helperValidate.ISO8601Duration, + }, + }, + } +} +func resourceDataProtectionBackupPolicyBlobStorageCreate(d *schema.ResourceData, meta interface{}) error { + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + client := meta.(*clients.Client).DataProtection.BackupPolicyClient + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + name := d.Get("name").(string) + vaultId, _ := parse.BackupVaultID(d.Get("vault_id").(string)) + id := parse.NewBackupPolicyID(subscriptionId, vaultId.ResourceGroup, vaultId.Name, name) + + existing, err := client.Get(ctx, id.BackupVaultName, id.ResourceGroup, id.Name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("checking for existing DataProtection BackupPolicy (%q): %+v", id, err) + } + } + if !utils.ResponseWasNotFound(existing.Response) { + return tf.ImportAsExistsError("azurerm_data_protection_backup_policy_blob_storage", id.ID()) + } + + parameters := dataprotection.BaseBackupPolicyResource{ + Properties: &dataprotection.BackupPolicy{ + PolicyRules: &[]dataprotection.BasicBasePolicyRule{ + dataprotection.AzureRetentionRule{ + Name: utils.String("Default"), + ObjectType: dataprotection.ObjectTypeAzureRetentionRule, + IsDefault: utils.Bool(true), + Lifecycles: &[]dataprotection.SourceLifeCycle{ + { + DeleteAfter: dataprotection.AbsoluteDeleteOption{ + Duration: utils.String(d.Get("retention_duration").(string)), + ObjectType: dataprotection.ObjectTypeAbsoluteDeleteOption, + }, + SourceDataStore: &dataprotection.DataStoreInfoBase{ + DataStoreType: "OperationalStore", + ObjectType: utils.String("DataStoreInfoBase"), + }, + TargetDataStoreCopySettings: &[]dataprotection.TargetCopySetting{}, + }, + }, + }, + }, + DatasourceTypes: &[]string{"Microsoft.Storage/storageAccounts/blobServices"}, + ObjectType: dataprotection.ObjectTypeBackupPolicy, + }, + } + + if _, err := client.CreateOrUpdate(ctx, id.BackupVaultName, id.ResourceGroup, id.Name, parameters); err != nil { + return fmt.Errorf("creating/updating DataProtection BackupPolicy (%q): %+v", id, err) + } + + d.SetId(id.ID()) + return resourceDataProtectionBackupPolicyBlobStorageRead(d, meta) +} + +func resourceDataProtectionBackupPolicyBlobStorageRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataProtection.BackupPolicyClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.BackupPolicyID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, id.BackupVaultName, id.ResourceGroup, id.Name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[INFO] dataprotection %q does not exist - removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("retrieving DataProtection BackupPolicy (%q): %+v", id, err) + } + vaultId := parse.NewBackupVaultID(id.SubscriptionId, id.ResourceGroup, id.BackupVaultName) + d.Set("name", id.Name) + d.Set("vault_id", vaultId.ID()) + if resp.Properties != nil { + if props, ok := resp.Properties.AsBackupPolicy(); ok { + if err := d.Set("retention_duration", flattenBackupPolicyBlobStorageDefaultRetentionRuleDuration(props.PolicyRules)); err != nil { + return fmt.Errorf("setting `default_retention_duration`: %+v", err) + } + } + } + return nil +} + +func resourceDataProtectionBackupPolicyBlobStorageDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataProtection.BackupPolicyClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.BackupPolicyID(d.Id()) + if err != nil { + return err + } + + if resp, err := client.Delete(ctx, id.BackupVaultName, id.ResourceGroup, id.Name); err != nil { + if utils.ResponseWasNotFound(resp) { + return nil + } + + return fmt.Errorf("deleting DataProtection BackupPolicy (%q): %+v", id, err) + } + return nil +} + +func flattenBackupPolicyBlobStorageDefaultRetentionRuleDuration(input *[]dataprotection.BasicBasePolicyRule) interface{} { + if input == nil { + return nil + } + + for _, item := range *input { + if retentionRule, ok := item.AsAzureRetentionRule(); ok && retentionRule.IsDefault != nil && *retentionRule.IsDefault { + if retentionRule.Lifecycles != nil && len(*retentionRule.Lifecycles) > 0 { + if deleteOption, ok := (*retentionRule.Lifecycles)[0].DeleteAfter.AsAbsoluteDeleteOption(); ok { + return *deleteOption.Duration + } + } + } + } + return nil +} diff --git a/azurerm/internal/services/dataprotection/data_protection_backup_policy_blob_storage_resource_test.go b/azurerm/internal/services/dataprotection/data_protection_backup_policy_blob_storage_resource_test.go new file mode 100644 index 000000000000..8ab71dddabb6 --- /dev/null +++ b/azurerm/internal/services/dataprotection/data_protection_backup_policy_blob_storage_resource_test.go @@ -0,0 +1,107 @@ +package dataprotection_test + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type DataProtectionBackupPolicyBlobStorageResource struct{} + +func TestAccDataProtectionBackupPolicyBlobStorage_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_policy_blob_storage", "test") + r := DataProtectionBackupPolicyBlobStorageResource{} + data.ResourceTest(t, r, []resource.TestStep{ + { + Config: r.basic(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataProtectionBackupPolicyBlobStorage_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_policy_blob_storage", "test") + r := DataProtectionBackupPolicyBlobStorageResource{} + data.ResourceTest(t, r, []resource.TestStep{ + { + Config: r.basic(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func (r DataProtectionBackupPolicyBlobStorageResource) Exists(ctx context.Context, client *clients.Client, state *terraform.InstanceState) (*bool, error) { + id, err := parse.BackupPolicyID(state.ID) + if err != nil { + return nil, err + } + resp, err := client.DataProtection.BackupPolicyClient.Get(ctx, id.BackupVaultName, id.ResourceGroup, id.Name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return utils.Bool(false), nil + } + return nil, fmt.Errorf("retrieving DataProtection BackupPolicy (%q): %+v", id, err) + } + return utils.Bool(true), nil +} + +func (r DataProtectionBackupPolicyBlobStorageResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctest-dataprotection-%d" + location = "%s" +} + +resource "azurerm_data_protection_backup_vault" "test" { + name = "acctest-dbv-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + datastore_type = "VaultStore" + redundancy = "LocallyRedundant" +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +} + +func (r DataProtectionBackupPolicyBlobStorageResource) basic(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_data_protection_backup_policy_blob_storage" "test" { + name = "acctest-dbp-%d" + vault_id = azurerm_data_protection_backup_vault.test.id + retention_duration = "P30D" +} +`, template, data.RandomInteger) +} + +func (r DataProtectionBackupPolicyBlobStorageResource) requiresImport(data acceptance.TestData) string { + config := r.basic(data) + return fmt.Sprintf(` +%s + +resource "azurerm_data_protection_backup_policy_blob_storage" "import" { + name = azurerm_data_protection_backup_policy_blob_storage.test.name + vault_id = azurerm_data_protection_backup_policy_blob_storage.test.vault_id + retention_duration = "P30D" +} +`, config) +} diff --git a/azurerm/internal/services/dataprotection/data_protection_backup_policy_disk_resource.go b/azurerm/internal/services/dataprotection/data_protection_backup_policy_disk_resource.go new file mode 100644 index 000000000000..962bb6666262 --- /dev/null +++ b/azurerm/internal/services/dataprotection/data_protection_backup_policy_disk_resource.go @@ -0,0 +1,442 @@ +package dataprotection + +import ( + "fmt" + "log" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + helperValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection/legacysdk/dataprotection" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection/validate" + azSchema "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceDataProtectionBackupPolicyDisk() *schema.Resource { + return &schema.Resource{ + Create: resourceDataProtectionBackupPolicyDiskCreate, + Read: resourceDataProtectionBackupPolicyDiskRead, + Delete: resourceDataProtectionBackupPolicyDiskDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Read: schema.DefaultTimeout(5 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Importer: azSchema.ValidateResourceIDPriorToImport(func(id string) error { + _, err := parse.BackupPolicyID(id) + return err + }), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringMatch( + regexp.MustCompile("^[-a-zA-Z0-9]{3,150}$"), + "DataProtection BackupPolicy name must be 3 - 150 characters long, contain only letters, numbers and hyphens.", + ), + }, + + "vault_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.BackupVaultID, + }, + + "backup_repeating_time_intervals": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + MinItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "default_retention_duration": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: helperValidate.ISO8601Duration, + }, + + "retention_rule": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "duration": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: helperValidate.ISO8601Duration, + }, + + "criteria": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "absolute_criteria": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + string(dataprotection.FirstOfDay), + string(dataprotection.FirstOfWeek), + }, false), + }, + }, + }, + }, + + "priority": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + }, + }, + }, + }, + } +} +func resourceDataProtectionBackupPolicyDiskCreate(d *schema.ResourceData, meta interface{}) error { + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + client := meta.(*clients.Client).DataProtection.BackupPolicyClient + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + name := d.Get("name").(string) + vaultId, _ := parse.BackupVaultID(d.Get("vault_id").(string)) + id := parse.NewBackupPolicyID(subscriptionId, vaultId.ResourceGroup, vaultId.Name, name) + + existing, err := client.Get(ctx, id.BackupVaultName, id.ResourceGroup, id.Name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("checking for existing DataProtection BackupPolicy (%q): %+v", id, err) + } + } + if !utils.ResponseWasNotFound(existing.Response) { + return tf.ImportAsExistsError("azurerm_data_protection_backup_policy_disk", id.ID()) + } + + taggingCriteria := expandBackupPolicyDiskTaggingCriteriaArray(d.Get("retention_rule").([]interface{})) + policyRules := make([]dataprotection.BasicBasePolicyRule, 0) + policyRules = append(policyRules, expandBackupPolicyDiskAzureBackupRuleArray(d.Get("backup_repeating_time_intervals").([]interface{}), taggingCriteria)...) + policyRules = append(policyRules, expandBackupPolicyDiskDefaultAzureRetentionRule(d.Get("default_retention_duration"))) + policyRules = append(policyRules, expandBackupPolicyDiskAzureRetentionRuleArray(d.Get("retention_rule").([]interface{}))...) + parameters := dataprotection.BaseBackupPolicyResource{ + Properties: &dataprotection.BackupPolicy{ + PolicyRules: &policyRules, + DatasourceTypes: &[]string{"Microsoft.Compute/disks"}, + ObjectType: dataprotection.ObjectTypeBackupPolicy, + }, + } + + if _, err := client.CreateOrUpdate(ctx, id.BackupVaultName, id.ResourceGroup, id.Name, parameters); err != nil { + return fmt.Errorf("creating/updating DataProtection BackupPolicy (%q): %+v", id, err) + } + + d.SetId(id.ID()) + return resourceDataProtectionBackupPolicyDiskRead(d, meta) +} + +func resourceDataProtectionBackupPolicyDiskRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataProtection.BackupPolicyClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.BackupPolicyID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, id.BackupVaultName, id.ResourceGroup, id.Name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[INFO] dataprotection %q does not exist - removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("retrieving DataProtection BackupPolicy (%q): %+v", id, err) + } + vaultId := parse.NewBackupVaultID(id.SubscriptionId, id.ResourceGroup, id.BackupVaultName) + d.Set("name", id.Name) + d.Set("vault_id", vaultId.ID()) + if resp.Properties != nil { + if props, ok := resp.Properties.AsBackupPolicy(); ok { + if err := d.Set("backup_repeating_time_intervals", flattenBackupPolicyDiskBackupRuleArray(props.PolicyRules)); err != nil { + return fmt.Errorf("setting `backup_repeating_time_intervals`: %+v", err) + } + if err := d.Set("default_retention_duration", flattenBackupPolicyDiskDefaultRetentionRuleDuration(props.PolicyRules)); err != nil { + return fmt.Errorf("setting `default_retention_duration`: %+v", err) + } + if err := d.Set("retention_rule", flattenBackupPolicyDiskRetentionRuleArray(props.PolicyRules)); err != nil { + return fmt.Errorf("setting `retention_rule`: %+v", err) + } + } + } + return nil +} + +func resourceDataProtectionBackupPolicyDiskDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataProtection.BackupPolicyClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.BackupPolicyID(d.Id()) + if err != nil { + return err + } + + if resp, err := client.Delete(ctx, id.BackupVaultName, id.ResourceGroup, id.Name); err != nil { + if utils.ResponseWasNotFound(resp) { + return nil + } + + return fmt.Errorf("deleting DataProtection BackupPolicy (%q): %+v", id, err) + } + return nil +} + +func expandBackupPolicyDiskAzureBackupRuleArray(input []interface{}, taggingCriteria *[]dataprotection.TaggingCriteria) []dataprotection.BasicBasePolicyRule { + results := make([]dataprotection.BasicBasePolicyRule, 0) + + results = append(results, dataprotection.AzureBackupRule{ + Name: utils.String("BackupIntervals"), + ObjectType: dataprotection.ObjectTypeAzureBackupRule, + DataStore: &dataprotection.DataStoreInfoBase{ + DataStoreType: dataprotection.OperationalStore, + ObjectType: utils.String("DataStoreInfoBase"), + }, + BackupParameters: &dataprotection.AzureBackupParams{ + BackupType: utils.String("Incremental"), + ObjectType: dataprotection.ObjectTypeAzureBackupParams, + }, + Trigger: dataprotection.ScheduleBasedTriggerContext{ + Schedule: &dataprotection.BackupSchedule{ + RepeatingTimeIntervals: utils.ExpandStringSlice(input), + }, + TaggingCriteria: taggingCriteria, + ObjectType: dataprotection.ObjectTypeScheduleBasedTriggerContext, + }, + }) + return results +} + +func expandBackupPolicyDiskAzureRetentionRuleArray(input []interface{}) []dataprotection.BasicBasePolicyRule { + results := make([]dataprotection.BasicBasePolicyRule, 0) + for _, item := range input { + v := item.(map[string]interface{}) + results = append(results, dataprotection.AzureRetentionRule{ + Name: utils.String(v["name"].(string)), + ObjectType: dataprotection.ObjectTypeAzureRetentionRule, + IsDefault: utils.Bool(false), + Lifecycles: &[]dataprotection.SourceLifeCycle{ + { + DeleteAfter: dataprotection.AbsoluteDeleteOption{ + Duration: utils.String(v["duration"].(string)), + ObjectType: dataprotection.ObjectTypeAbsoluteDeleteOption, + }, + SourceDataStore: &dataprotection.DataStoreInfoBase{ + DataStoreType: "OperationalStore", + ObjectType: utils.String("DataStoreInfoBase"), + }, + TargetDataStoreCopySettings: &[]dataprotection.TargetCopySetting{}, + }, + }, + }) + } + return results +} + +func expandBackupPolicyDiskDefaultAzureRetentionRule(input interface{}) dataprotection.BasicBasePolicyRule { + return dataprotection.AzureRetentionRule{ + Name: utils.String("Default"), + ObjectType: dataprotection.ObjectTypeAzureRetentionRule, + IsDefault: utils.Bool(true), + Lifecycles: &[]dataprotection.SourceLifeCycle{ + { + DeleteAfter: dataprotection.AbsoluteDeleteOption{ + Duration: utils.String(input.(string)), + ObjectType: dataprotection.ObjectTypeAbsoluteDeleteOption, + }, + SourceDataStore: &dataprotection.DataStoreInfoBase{ + DataStoreType: "OperationalStore", + ObjectType: utils.String("DataStoreInfoBase"), + }, + TargetDataStoreCopySettings: &[]dataprotection.TargetCopySetting{}, + }, + }, + } +} + +func expandBackupPolicyDiskTaggingCriteriaArray(input []interface{}) *[]dataprotection.TaggingCriteria { + results := []dataprotection.TaggingCriteria{ + { + Criteria: nil, + IsDefault: utils.Bool(true), + TaggingPriority: utils.Int64(99), + TagInfo: &dataprotection.RetentionTag{ + ID: utils.String("Default_"), + TagName: utils.String("Default"), + }, + }, + } + for _, item := range input { + v := item.(map[string]interface{}) + results = append(results, dataprotection.TaggingCriteria{ + Criteria: expandBackupPolicyDiskCriteriaArray(v["criteria"].([]interface{})), + IsDefault: utils.Bool(false), + TaggingPriority: utils.Int64(int64(v["priority"].(int))), + TagInfo: &dataprotection.RetentionTag{ + ID: utils.String(v["name"].(string) + "_"), + TagName: utils.String(v["name"].(string)), + }, + }) + } + return &results +} + +func expandBackupPolicyDiskCriteriaArray(input []interface{}) *[]dataprotection.BasicBackupCriteria { + results := make([]dataprotection.BasicBackupCriteria, 0) + for _, item := range input { + v := item.(map[string]interface{}) + var absoluteCriteria []dataprotection.AbsoluteMarker + if absoluteCriteriaRaw := v["absolute_criteria"].(string); len(absoluteCriteriaRaw) > 0 { + absoluteCriteria = []dataprotection.AbsoluteMarker{dataprotection.AbsoluteMarker(absoluteCriteriaRaw)} + } + results = append(results, dataprotection.ScheduleBasedBackupCriteria{ + AbsoluteCriteria: &absoluteCriteria, + ObjectType: dataprotection.ObjectTypeScheduleBasedBackupCriteria, + }) + } + return &results +} + +func flattenBackupPolicyDiskBackupRuleArray(input *[]dataprotection.BasicBasePolicyRule) []interface{} { + if input == nil { + return make([]interface{}, 0) + } + for _, item := range *input { + if backupRule, ok := item.AsAzureBackupRule(); ok { + if backupRule.Trigger != nil { + if scheduleBasedTrigger, ok := backupRule.Trigger.AsScheduleBasedTriggerContext(); ok { + if scheduleBasedTrigger.Schedule != nil { + return utils.FlattenStringSlice(scheduleBasedTrigger.Schedule.RepeatingTimeIntervals) + } + } + } + } + } + return make([]interface{}, 0) +} + +func flattenBackupPolicyDiskDefaultRetentionRuleDuration(input *[]dataprotection.BasicBasePolicyRule) interface{} { + if input == nil { + return nil + } + + for _, item := range *input { + if retentionRule, ok := item.AsAzureRetentionRule(); ok && retentionRule.IsDefault != nil && *retentionRule.IsDefault { + if retentionRule.Lifecycles != nil && len(*retentionRule.Lifecycles) > 0 { + if deleteOption, ok := (*retentionRule.Lifecycles)[0].DeleteAfter.AsAbsoluteDeleteOption(); ok { + return *deleteOption.Duration + } + } + } + } + return nil +} + +func flattenBackupPolicyDiskRetentionRuleArray(input *[]dataprotection.BasicBasePolicyRule) []interface{} { + results := make([]interface{}, 0) + if input == nil { + return results + } + + var taggingCriterias []dataprotection.TaggingCriteria + for _, item := range *input { + if backupRule, ok := item.AsAzureBackupRule(); ok { + if trigger, ok := backupRule.Trigger.AsScheduleBasedTriggerContext(); ok { + if trigger.TaggingCriteria != nil { + taggingCriterias = *trigger.TaggingCriteria + } + } + } + } + + for _, item := range *input { + if retentionRule, ok := item.AsAzureRetentionRule(); ok && (retentionRule.IsDefault == nil || !*retentionRule.IsDefault) { + var name string + if retentionRule.Name != nil { + name = *retentionRule.Name + } + var taggingPriority int64 + var taggingCriteria []interface{} + for _, criteria := range taggingCriterias { + if criteria.TagInfo != nil && criteria.TagInfo.TagName != nil && strings.EqualFold(*criteria.TagInfo.TagName, name) { + taggingPriority = *criteria.TaggingPriority + taggingCriteria = flattenBackupPolicyDiskBackupCriteriaArray(criteria.Criteria) + } + } + var duration string + if retentionRule.Lifecycles != nil && len(*retentionRule.Lifecycles) > 0 { + if deleteOption, ok := (*retentionRule.Lifecycles)[0].DeleteAfter.AsAbsoluteDeleteOption(); ok { + duration = *deleteOption.Duration + } + } + results = append(results, map[string]interface{}{ + "name": name, + "priority": taggingPriority, + "criteria": taggingCriteria, + "duration": duration, + }) + } + } + return results +} + +func flattenBackupPolicyDiskBackupCriteriaArray(input *[]dataprotection.BasicBackupCriteria) []interface{} { + results := make([]interface{}, 0) + if input == nil { + return results + } + + for _, item := range *input { + if criteria, ok := item.AsScheduleBasedBackupCriteria(); ok { + var absoluteCriteria string + if criteria.AbsoluteCriteria != nil && len(*criteria.AbsoluteCriteria) > 0 { + absoluteCriteria = string((*criteria.AbsoluteCriteria)[0]) + } + + results = append(results, map[string]interface{}{ + "absolute_criteria": absoluteCriteria, + }) + } + } + return results +} diff --git a/azurerm/internal/services/dataprotection/data_protection_backup_policy_disk_resource_test.go b/azurerm/internal/services/dataprotection/data_protection_backup_policy_disk_resource_test.go new file mode 100644 index 000000000000..7e83b9aacec5 --- /dev/null +++ b/azurerm/internal/services/dataprotection/data_protection_backup_policy_disk_resource_test.go @@ -0,0 +1,182 @@ +package dataprotection_test + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type DataProtectionBackupPolicyDiskResource struct{} + +func TestAccDataProtectionBackupPolicyDisk_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_policy_disk", "test") + r := DataProtectionBackupPolicyDiskResource{} + data.ResourceTest(t, r, []resource.TestStep{ + { + Config: r.basic(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataProtectionBackupPolicyDisk_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_policy_disk", "test") + r := DataProtectionBackupPolicyDiskResource{} + data.ResourceTest(t, r, []resource.TestStep{ + { + Config: r.basic(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func TestAccDataProtectionBackupPolicyDisk_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_policy_disk", "test") + r := DataProtectionBackupPolicyDiskResource{} + data.ResourceTest(t, r, []resource.TestStep{ + { + Config: r.complete(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataProtectionBackupPolicyDisk_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_policy_disk", "test") + r := DataProtectionBackupPolicyDiskResource{} + data.ResourceTest(t, r, []resource.TestStep{ + { + Config: r.basic(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.complete(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func (r DataProtectionBackupPolicyDiskResource) Exists(ctx context.Context, client *clients.Client, state *terraform.InstanceState) (*bool, error) { + id, err := parse.BackupPolicyID(state.ID) + if err != nil { + return nil, err + } + resp, err := client.DataProtection.BackupPolicyClient.Get(ctx, id.BackupVaultName, id.ResourceGroup, id.Name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return utils.Bool(false), nil + } + return nil, fmt.Errorf("retrieving DataProtection BackupPolicy (%q): %+v", id, err) + } + return utils.Bool(true), nil +} + +func (r DataProtectionBackupPolicyDiskResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctest-dataprotection-%d" + location = "%s" +} + +resource "azurerm_data_protection_backup_vault" "test" { + name = "acctest-dbv-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + datastore_type = "VaultStore" + redundancy = "LocallyRedundant" +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +} + +func (r DataProtectionBackupPolicyDiskResource) basic(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_data_protection_backup_policy_disk" "test" { + name = "acctest-dbp-%d" + vault_id = azurerm_data_protection_backup_vault.test.id + backup_repeating_time_intervals = ["R/2021-05-19T06:33:16+00:00/PT4H"] + default_retention_duration = "P7D" +} +`, template, data.RandomInteger) +} + +func (r DataProtectionBackupPolicyDiskResource) requiresImport(data acceptance.TestData) string { + config := r.basic(data) + return fmt.Sprintf(` +%s + +resource "azurerm_data_protection_backup_policy_disk" "import" { + name = azurerm_data_protection_backup_policy_disk.test.name + vault_id = azurerm_data_protection_backup_policy_disk.test.vault_id + backup_repeating_time_intervals = ["R/2021-05-19T06:33:16+00:00/PT4H"] + default_retention_duration = "P7D" +} +`, config) +} + +func (r DataProtectionBackupPolicyDiskResource) complete(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s +resource "azurerm_data_protection_backup_policy_disk" "test" { + name = "acctest-dbp-%d" + vault_id = azurerm_data_protection_backup_vault.test.id + backup_repeating_time_intervals = ["R/2021-05-19T06:33:16+00:00/PT4H"] + default_retention_duration = "P7D" + + retention_rule { + name = "Daily" + duration = "P7D" + priority = 25 + criteria { + absolute_criteria = "FirstOfDay" + } + } + + retention_rule { + name = "Weekly" + duration = "P7D" + priority = 20 + criteria { + absolute_criteria = "FirstOfWeek" + } + } +} +`, template, data.RandomInteger) +} diff --git a/azurerm/internal/services/dataprotection/data_protection_backup_policy_postgresql_resource.go b/azurerm/internal/services/dataprotection/data_protection_backup_policy_postgresql_resource.go new file mode 100644 index 000000000000..9c3e9168b4fe --- /dev/null +++ b/azurerm/internal/services/dataprotection/data_protection_backup_policy_postgresql_resource.go @@ -0,0 +1,569 @@ +package dataprotection + +import ( + "fmt" + "log" + "regexp" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest/date" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection/legacysdk/dataprotection" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceDataProtectionBackupPolicyPostgreSQL() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceDataProtectionBackupPolicyPostgreSQLCreate, + Read: resourceDataProtectionBackupPolicyPostgreSQLRead, + Delete: resourceDataProtectionBackupPolicyPostgreSQLDelete, + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.BackupPolicyID(id) + return err + }), + + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringMatch( + regexp.MustCompile("^[-a-zA-Z0-9]{3,150}$"), + "DataProtection BackupPolicy name must be 3 - 150 characters long, contain only letters, numbers and hyphens.", + ), + }, + + "resource_group_name": azure.SchemaResourceGroupName(), + + "vault_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + }, + + "backup_repeating_time_intervals": { + Type: pluginsdk.TypeList, + Required: true, + ForceNew: true, + MinItems: 1, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "default_retention_duration": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.ISO8601Duration, + }, + + "retention_rule": { + Type: pluginsdk.TypeList, + Optional: true, + ForceNew: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + }, + + "duration": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.ISO8601Duration, + }, + + "criteria": { + Type: pluginsdk.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "absolute_criteria": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + string(dataprotection.AllBackup), + string(dataprotection.FirstOfDay), + string(dataprotection.FirstOfMonth), + string(dataprotection.FirstOfWeek), + string(dataprotection.FirstOfYear), + }, false), + }, + + "days_of_week": { + Type: pluginsdk.TypeSet, + Optional: true, + ForceNew: true, + MinItems: 1, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.IsDayOfTheWeek(false), + }, + }, + + "months_of_year": { + Type: pluginsdk.TypeSet, + Optional: true, + ForceNew: true, + MinItems: 1, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.IsMonth(false), + }, + }, + + "scheduled_backup_times": { + Type: pluginsdk.TypeSet, + Optional: true, + ForceNew: true, + MinItems: 1, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.IsRFC3339Time, + }, + }, + + "weeks_of_month": { + Type: pluginsdk.TypeSet, + Optional: true, + ForceNew: true, + MinItems: 1, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringInSlice([]string{ + string(dataprotection.First), + string(dataprotection.Second), + string(dataprotection.Third), + string(dataprotection.Fourth), + string(dataprotection.Last), + }, false), + }, + }, + }, + }, + }, + + "priority": { + Type: pluginsdk.TypeInt, + Required: true, + ForceNew: true, + }, + }, + }, + }, + }, + } +} + +func resourceDataProtectionBackupPolicyPostgreSQLCreate(d *pluginsdk.ResourceData, meta interface{}) error { + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + client := meta.(*clients.Client).DataProtection.BackupPolicyClient + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + name := d.Get("name").(string) + resourceGroup := d.Get("resource_group_name").(string) + vaultName := d.Get("vault_name").(string) + + id := parse.NewBackupPolicyID(subscriptionId, resourceGroup, vaultName, name) + + existing, err := client.Get(ctx, id.BackupVaultName, id.ResourceGroup, id.Name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("checking for existing DataProtection BackupPolicy (%q): %+v", id, err) + } + } + if !utils.ResponseWasNotFound(existing.Response) { + return tf.ImportAsExistsError("azurerm_data_protection_backup_policy_postgresql", id.ID()) + } + + taggingCriteria := expandBackupPolicyPostgreSQLTaggingCriteriaArray(d.Get("retention_rule").([]interface{})) + policyRules := make([]dataprotection.BasicBasePolicyRule, 0) + policyRules = append(policyRules, expandBackupPolicyPostgreSQLAzureBackupRuleArray(d.Get("backup_repeating_time_intervals").([]interface{}), taggingCriteria)...) + policyRules = append(policyRules, expandBackupPolicyPostgreSQLDefaultAzureRetentionRule(d.Get("default_retention_duration"))) + policyRules = append(policyRules, expandBackupPolicyPostgreSQLAzureRetentionRuleArray(d.Get("retention_rule").([]interface{}))...) + parameters := dataprotection.BaseBackupPolicyResource{ + Properties: &dataprotection.BackupPolicy{ + PolicyRules: &policyRules, + DatasourceTypes: &[]string{"Microsoft.DBforPostgreSQL/servers/databases"}, + ObjectType: dataprotection.ObjectTypeBackupPolicy, + }, + } + + if _, err := client.CreateOrUpdate(ctx, id.BackupVaultName, id.ResourceGroup, id.Name, parameters); err != nil { + return fmt.Errorf("creating/updating DataProtection BackupPolicy (%q): %+v", id, err) + } + + d.SetId(id.ID()) + return resourceDataProtectionBackupPolicyPostgreSQLRead(d, meta) +} + +func resourceDataProtectionBackupPolicyPostgreSQLRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataProtection.BackupPolicyClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.BackupPolicyID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, id.BackupVaultName, id.ResourceGroup, id.Name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[INFO] dataprotection %q does not exist - removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("retrieving DataProtection BackupPolicy (%q): %+v", id, err) + } + d.Set("name", id.Name) + d.Set("resource_group_name", id.ResourceGroup) + d.Set("vault_name", id.BackupVaultName) + if resp.Properties != nil { + if props, ok := resp.Properties.AsBackupPolicy(); ok { + if err := d.Set("backup_repeating_time_intervals", flattenBackupPolicyPostgreSQLBackupRuleArray(props.PolicyRules)); err != nil { + return fmt.Errorf("setting `backup_rule`: %+v", err) + } + if err := d.Set("default_retention_duration", flattenBackupPolicyPostgreSQLDefaultRetentionRuleDuration(props.PolicyRules)); err != nil { + return fmt.Errorf("setting `default_retention_duration`: %+v", err) + } + if err := d.Set("retention_rule", flattenBackupPolicyPostgreSQLRetentionRuleArray(props.PolicyRules)); err != nil { + return fmt.Errorf("setting `retention_rule`: %+v", err) + } + } + } + return nil +} + +func resourceDataProtectionBackupPolicyPostgreSQLDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataProtection.BackupPolicyClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.BackupPolicyID(d.Id()) + if err != nil { + return err + } + + if resp, err := client.Delete(ctx, id.BackupVaultName, id.ResourceGroup, id.Name); err != nil { + if utils.ResponseWasNotFound(resp) { + return nil + } + + return fmt.Errorf("deleting DataProtection BackupPolicy (%q): %+v", id, err) + } + return nil +} + +func expandBackupPolicyPostgreSQLAzureBackupRuleArray(input []interface{}, taggingCriteria *[]dataprotection.TaggingCriteria) []dataprotection.BasicBasePolicyRule { + results := make([]dataprotection.BasicBasePolicyRule, 0) + results = append(results, dataprotection.AzureBackupRule{ + Name: utils.String("BackupIntervals"), + ObjectType: dataprotection.ObjectTypeAzureBackupRule, + DataStore: &dataprotection.DataStoreInfoBase{ + DataStoreType: dataprotection.VaultStore, + ObjectType: utils.String("DataStoreInfoBase"), + }, + BackupParameters: &dataprotection.AzureBackupParams{ + BackupType: utils.String("Full"), + ObjectType: dataprotection.ObjectTypeAzureBackupParams, + }, + Trigger: dataprotection.ScheduleBasedTriggerContext{ + Schedule: &dataprotection.BackupSchedule{ + RepeatingTimeIntervals: utils.ExpandStringSlice(input), + }, + TaggingCriteria: taggingCriteria, + ObjectType: dataprotection.ObjectTypeScheduleBasedTriggerContext, + }, + }) + + return results +} + +func expandBackupPolicyPostgreSQLAzureRetentionRuleArray(input []interface{}) []dataprotection.BasicBasePolicyRule { + results := make([]dataprotection.BasicBasePolicyRule, 0) + for _, item := range input { + v := item.(map[string]interface{}) + results = append(results, dataprotection.AzureRetentionRule{ + Name: utils.String(v["name"].(string)), + ObjectType: dataprotection.ObjectTypeAzureRetentionRule, + IsDefault: utils.Bool(false), + Lifecycles: &[]dataprotection.SourceLifeCycle{ + { + DeleteAfter: dataprotection.AbsoluteDeleteOption{ + Duration: utils.String(v["duration"].(string)), + ObjectType: dataprotection.ObjectTypeAbsoluteDeleteOption, + }, + SourceDataStore: &dataprotection.DataStoreInfoBase{ + DataStoreType: "VaultStore", + ObjectType: utils.String("DataStoreInfoBase"), + }, + TargetDataStoreCopySettings: &[]dataprotection.TargetCopySetting{}, + }, + }, + }) + } + return results +} + +func expandBackupPolicyPostgreSQLDefaultAzureRetentionRule(input interface{}) dataprotection.BasicBasePolicyRule { + return dataprotection.AzureRetentionRule{ + Name: utils.String("Default"), + ObjectType: dataprotection.ObjectTypeAzureRetentionRule, + IsDefault: utils.Bool(true), + Lifecycles: &[]dataprotection.SourceLifeCycle{ + { + DeleteAfter: dataprotection.AbsoluteDeleteOption{ + Duration: utils.String(input.(string)), + ObjectType: dataprotection.ObjectTypeAbsoluteDeleteOption, + }, + SourceDataStore: &dataprotection.DataStoreInfoBase{ + DataStoreType: "VaultStore", + ObjectType: utils.String("DataStoreInfoBase"), + }, + TargetDataStoreCopySettings: &[]dataprotection.TargetCopySetting{}, + }, + }, + } +} + +func expandBackupPolicyPostgreSQLTaggingCriteriaArray(input []interface{}) *[]dataprotection.TaggingCriteria { + results := []dataprotection.TaggingCriteria{ + { + Criteria: nil, + IsDefault: utils.Bool(true), + TaggingPriority: utils.Int64(99), + TagInfo: &dataprotection.RetentionTag{ + ID: utils.String("Default_"), + TagName: utils.String("Default"), + }, + }, + } + for _, item := range input { + v := item.(map[string]interface{}) + results = append(results, dataprotection.TaggingCriteria{ + Criteria: expandBackupPolicyPostgreSQLCriteriaArray(v["criteria"].([]interface{})), + IsDefault: utils.Bool(false), + TaggingPriority: utils.Int64(int64(v["priority"].(int))), + TagInfo: &dataprotection.RetentionTag{ + ID: utils.String(v["name"].(string) + "_"), + TagName: utils.String(v["name"].(string)), + }, + }) + } + return &results +} + +func expandBackupPolicyPostgreSQLCriteriaArray(input []interface{}) *[]dataprotection.BasicBackupCriteria { + results := make([]dataprotection.BasicBackupCriteria, 0) + for _, item := range input { + v := item.(map[string]interface{}) + var absoluteCriteria []dataprotection.AbsoluteMarker + if absoluteCriteriaRaw := v["absolute_criteria"].(string); len(absoluteCriteriaRaw) > 0 { + absoluteCriteria = []dataprotection.AbsoluteMarker{dataprotection.AbsoluteMarker(absoluteCriteriaRaw)} + } + + var daysOfWeek []dataprotection.DayOfWeek + if v["days_of_week"].(*pluginsdk.Set).Len() > 0 { + daysOfWeek = make([]dataprotection.DayOfWeek, 0) + for _, value := range v["days_of_week"].(*pluginsdk.Set).List() { + daysOfWeek = append(daysOfWeek, dataprotection.DayOfWeek(value.(string))) + } + } + + var monthsOfYear []dataprotection.Month + if v["months_of_year"].(*pluginsdk.Set).Len() > 0 { + monthsOfYear = make([]dataprotection.Month, 0) + for _, value := range v["months_of_year"].(*pluginsdk.Set).List() { + monthsOfYear = append(monthsOfYear, dataprotection.Month(value.(string))) + } + } + + var weeksOfMonth []dataprotection.WeekNumber + if v["weeks_of_month"].(*pluginsdk.Set).Len() > 0 { + weeksOfMonth = make([]dataprotection.WeekNumber, 0) + for _, value := range v["weeks_of_month"].(*pluginsdk.Set).List() { + weeksOfMonth = append(weeksOfMonth, dataprotection.WeekNumber(value.(string))) + } + } + + var scheduleTimes []date.Time + if v["scheduled_backup_times"].(*pluginsdk.Set).Len() > 0 { + scheduleTimes = make([]date.Time, 0) + for _, value := range v["scheduled_backup_times"].(*pluginsdk.Set).List() { + t, _ := time.Parse(time.RFC3339, value.(string)) + scheduleTimes = append(scheduleTimes, date.Time{Time: t}) + } + } + results = append(results, dataprotection.ScheduleBasedBackupCriteria{ + AbsoluteCriteria: &absoluteCriteria, + DaysOfMonth: nil, + DaysOfTheWeek: &daysOfWeek, + MonthsOfYear: &monthsOfYear, + ScheduleTimes: &scheduleTimes, + WeeksOfTheMonth: &weeksOfMonth, + ObjectType: dataprotection.ObjectTypeScheduleBasedBackupCriteria, + }) + } + return &results +} + +func flattenBackupPolicyPostgreSQLBackupRuleArray(input *[]dataprotection.BasicBasePolicyRule) []interface{} { + if input == nil { + return make([]interface{}, 0) + } + for _, item := range *input { + if backupRule, ok := item.AsAzureBackupRule(); ok { + if backupRule.Trigger != nil { + if scheduleBasedTrigger, ok := backupRule.Trigger.AsScheduleBasedTriggerContext(); ok { + if scheduleBasedTrigger.Schedule != nil { + return utils.FlattenStringSlice(scheduleBasedTrigger.Schedule.RepeatingTimeIntervals) + } + } + } + } + } + return make([]interface{}, 0) +} + +func flattenBackupPolicyPostgreSQLDefaultRetentionRuleDuration(input *[]dataprotection.BasicBasePolicyRule) interface{} { + if input == nil { + return nil + } + + for _, item := range *input { + if retentionRule, ok := item.AsAzureRetentionRule(); ok && retentionRule.IsDefault != nil && *retentionRule.IsDefault { + if retentionRule.Lifecycles != nil && len(*retentionRule.Lifecycles) > 0 { + if deleteOption, ok := (*retentionRule.Lifecycles)[0].DeleteAfter.AsAbsoluteDeleteOption(); ok { + return *deleteOption.Duration + } + } + } + } + return nil +} + +func flattenBackupPolicyPostgreSQLRetentionRuleArray(input *[]dataprotection.BasicBasePolicyRule) []interface{} { + results := make([]interface{}, 0) + if input == nil { + return results + } + + var taggingCriterias []dataprotection.TaggingCriteria + for _, item := range *input { + if backupRule, ok := item.AsAzureBackupRule(); ok { + if trigger, ok := backupRule.Trigger.AsScheduleBasedTriggerContext(); ok { + if trigger.TaggingCriteria != nil { + taggingCriterias = *trigger.TaggingCriteria + } + } + } + } + + for _, item := range *input { + if retentionRule, ok := item.AsAzureRetentionRule(); ok && (retentionRule.IsDefault == nil || !*retentionRule.IsDefault) { + var name string + if retentionRule.Name != nil { + name = *retentionRule.Name + } + var taggingPriority int64 + var taggingCriteria []interface{} + for _, criteria := range taggingCriterias { + if criteria.TagInfo != nil && criteria.TagInfo.TagName != nil && strings.EqualFold(*criteria.TagInfo.TagName, name) { + taggingPriority = *criteria.TaggingPriority + taggingCriteria = flattenBackupPolicyPostgreSQLBackupCriteriaArray(criteria.Criteria) + } + } + var duration string + if retentionRule.Lifecycles != nil && len(*retentionRule.Lifecycles) > 0 { + if deleteOption, ok := (*retentionRule.Lifecycles)[0].DeleteAfter.AsAbsoluteDeleteOption(); ok { + duration = *deleteOption.Duration + } + } + results = append(results, map[string]interface{}{ + "name": name, + "priority": taggingPriority, + "criteria": taggingCriteria, + "duration": duration, + }) + } + } + return results +} + +func flattenBackupPolicyPostgreSQLBackupCriteriaArray(input *[]dataprotection.BasicBackupCriteria) []interface{} { + results := make([]interface{}, 0) + if input == nil { + return results + } + + for _, item := range *input { + if criteria, ok := item.AsScheduleBasedBackupCriteria(); ok { + var absoluteCriteria string + if criteria.AbsoluteCriteria != nil && len(*criteria.AbsoluteCriteria) > 0 { + absoluteCriteria = string((*criteria.AbsoluteCriteria)[0]) + } + var daysOfWeek []string + if criteria.DaysOfTheWeek != nil { + daysOfWeek = make([]string, 0) + for _, item := range *criteria.DaysOfTheWeek { + daysOfWeek = append(daysOfWeek, (string)(item)) + } + } + var monthsOfYear []string + if criteria.MonthsOfYear != nil { + monthsOfYear = make([]string, 0) + for _, item := range *criteria.MonthsOfYear { + monthsOfYear = append(monthsOfYear, (string)(item)) + } + } + var weeksOfMonth []string + if criteria.WeeksOfTheMonth != nil { + weeksOfMonth = make([]string, 0) + for _, item := range *criteria.WeeksOfTheMonth { + weeksOfMonth = append(weeksOfMonth, (string)(item)) + } + } + var scheduleTimes []string + if criteria.ScheduleTimes != nil { + scheduleTimes = make([]string, 0) + for _, item := range *criteria.ScheduleTimes { + scheduleTimes = append(scheduleTimes, item.String()) + } + } + + results = append(results, map[string]interface{}{ + "absolute_criteria": absoluteCriteria, + "days_of_week": daysOfWeek, + "months_of_year": monthsOfYear, + "weeks_of_month": weeksOfMonth, + "scheduled_backup_times": scheduleTimes, + }) + } + } + return results +} diff --git a/azurerm/internal/services/dataprotection/data_protection_backup_policy_postgresql_resource_test.go b/azurerm/internal/services/dataprotection/data_protection_backup_policy_postgresql_resource_test.go new file mode 100644 index 000000000000..0dc8b6e703ea --- /dev/null +++ b/azurerm/internal/services/dataprotection/data_protection_backup_policy_postgresql_resource_test.go @@ -0,0 +1,199 @@ +package dataprotection_test + +import ( + "context" + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type DataProtectionBackupPolicyPostgreSQLResource struct{} + +func TestAccDataProtectionBackupPolicyPostgreSQL_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_policy_postgresql", "test") + r := DataProtectionBackupPolicyPostgreSQLResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataProtectionBackupPolicyPostgreSQL_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_policy_postgresql", "test") + r := DataProtectionBackupPolicyPostgreSQLResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func TestAccDataProtectionBackupPolicyPostgreSQL_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_policy_postgresql", "test") + r := DataProtectionBackupPolicyPostgreSQLResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataProtectionBackupPolicyPostgreSQL_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_policy_postgresql", "test") + r := DataProtectionBackupPolicyPostgreSQLResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func (r DataProtectionBackupPolicyPostgreSQLResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.BackupPolicyID(state.ID) + if err != nil { + return nil, err + } + resp, err := client.DataProtection.BackupPolicyClient.Get(ctx, id.BackupVaultName, id.ResourceGroup, id.Name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return utils.Bool(false), nil + } + return nil, fmt.Errorf("retrieving DataProtection BackupPolicy (%q): %+v", id, err) + } + return utils.Bool(true), nil +} + +func (r DataProtectionBackupPolicyPostgreSQLResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctest-dataprotection-%d" + location = "%s" +} + +resource "azurerm_data_protection_backup_vault" "test" { + name = "acctest-dbv-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + datastore_type = "VaultStore" + redundancy = "LocallyRedundant" +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +} + +func (r DataProtectionBackupPolicyPostgreSQLResource) basic(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_data_protection_backup_policy_postgresql" "test" { + name = "acctest-dbp-%d" + resource_group_name = azurerm_resource_group.test.name + vault_name = azurerm_data_protection_backup_vault.test.name + + backup_repeating_time_intervals = ["R/2021-05-23T02:30:00+00:00/P1W"] + default_retention_duration = "P4M" +} +`, template, data.RandomInteger) +} + +func (r DataProtectionBackupPolicyPostgreSQLResource) requiresImport(data acceptance.TestData) string { + config := r.basic(data) + return fmt.Sprintf(` +%s + +resource "azurerm_data_protection_backup_policy_postgresql" "import" { + name = azurerm_data_protection_backup_policy_postgresql.test.name + resource_group_name = azurerm_data_protection_backup_policy_postgresql.test.resource_group_name + vault_name = azurerm_data_protection_backup_policy_postgresql.test.vault_name + + backup_repeating_time_intervals = ["R/2021-05-23T02:30:00+00:00/P1W"] + default_retention_duration = "P4M" +} +`, config) +} + +func (r DataProtectionBackupPolicyPostgreSQLResource) complete(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_data_protection_backup_policy_postgresql" "test" { + name = "acctest-dbp-%d" + resource_group_name = azurerm_resource_group.test.name + vault_name = azurerm_data_protection_backup_vault.test.name + + backup_repeating_time_intervals = ["R/2021-05-23T02:30:00+00:00/P1W"] + default_retention_duration = "P4M" + retention_rule { + name = "weekly" + duration = "P6M" + priority = 20 + criteria { + absolute_criteria = "FirstOfWeek" + } + } + + retention_rule { + name = "thursday" + duration = "P1W" + priority = 25 + criteria { + days_of_week = ["Thursday"] + scheduled_backup_times = ["2021-05-23T02:30:00Z"] + } + } + + retention_rule { + name = "monthly" + duration = "P1D" + priority = 30 + criteria { + weeks_of_month = ["First", "Last"] + days_of_week = ["Tuesday"] + scheduled_backup_times = ["2021-05-23T02:30:00Z"] + } + } +} +`, template, data.RandomInteger) +} diff --git a/azurerm/internal/services/dataprotection/data_protection_backup_vault_resource.go b/azurerm/internal/services/dataprotection/data_protection_backup_vault_resource.go new file mode 100644 index 000000000000..a7780b476e02 --- /dev/null +++ b/azurerm/internal/services/dataprotection/data_protection_backup_vault_resource.go @@ -0,0 +1,229 @@ +package dataprotection + +import ( + "fmt" + "log" + "regexp" + "time" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/identity" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection/legacysdk/dataprotection" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceDataProtectionBackupVault() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceDataProtectionBackupVaultCreate, + Read: resourceDataProtectionBackupVaultRead, + Update: resourceDataProtectionBackupVaultUpdate, + Delete: resourceDataProtectionBackupVaultDelete, + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.BackupVaultID(id) + return err + }), + + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringMatch( + regexp.MustCompile("^[-a-zA-Z0-9]{2,50}$"), + "DataProtection BackupVault name must be 2 - 50 characters long, contain only letters, numbers and hyphens.).", + ), + }, + + "resource_group_name": azure.SchemaResourceGroupName(), + + "location": azure.SchemaLocation(), + + "datastore_type": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + string(dataprotection.StorageSettingStoreTypesArchiveStore), + string(dataprotection.StorageSettingStoreTypesSnapshotStore), + string(dataprotection.StorageSettingStoreTypesVaultStore), + }, false), + }, + + "redundancy": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + string(dataprotection.GeoRedundant), + string(dataprotection.LocallyRedundant), + }, false), + }, + + "identity": identity.SystemAssigned{}.Schema(), + + "tags": tags.Schema(), + }, + } +} +func resourceDataProtectionBackupVaultCreate(d *pluginsdk.ResourceData, meta interface{}) error { + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + client := meta.(*clients.Client).DataProtection.BackupVaultClient + ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) + defer cancel() + + name := d.Get("name").(string) + resourceGroup := d.Get("resource_group_name").(string) + + id := parse.NewBackupVaultID(subscriptionId, resourceGroup, name) + + existing, err := client.Get(ctx, id.Name, id.ResourceGroup) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("checking for existing DataProtection BackupVault (%q): %+v", id, err) + } + } + if !utils.ResponseWasNotFound(existing.Response) { + return tf.ImportAsExistsError("azurerm_data_protection_backup_vault", id.ID()) + } + + parameters := dataprotection.BackupVaultResource{ + Location: utils.String(location.Normalize(d.Get("location").(string))), + Properties: &dataprotection.BackupVault{ + StorageSettings: &[]dataprotection.StorageSetting{ + { + DatastoreType: dataprotection.StorageSettingStoreTypes(d.Get("datastore_type").(string)), + Type: dataprotection.StorageSettingTypes(d.Get("redundancy").(string)), + }}, + }, + Identity: expandBackupVaultDppIdentityDetails(d.Get("identity").([]interface{})), + Tags: tags.Expand(d.Get("tags").(map[string]interface{})), + } + future, err := client.CreateOrUpdate(ctx, id.Name, id.ResourceGroup, parameters) + if err != nil { + return fmt.Errorf("creating DataProtection BackupVault (%q): %+v", id, err) + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for creation of the DataProtection BackupVault (%q): %+v", id, err) + } + + d.SetId(id.ID()) + return resourceDataProtectionBackupVaultRead(d, meta) +} + +func resourceDataProtectionBackupVaultRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataProtection.BackupVaultClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.BackupVaultID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, id.Name, id.ResourceGroup) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[INFO] DataProtection BackupVault %q does not exist - removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("retrieving DataProtection BackupVault (%q): %+v", id, err) + } + d.Set("name", id.Name) + d.Set("resource_group_name", id.ResourceGroup) + d.Set("location", location.NormalizeNilable(resp.Location)) + if props := resp.Properties; props != nil { + if props.StorageSettings != nil && len(*props.StorageSettings) > 0 { + d.Set("datastore_type", (*props.StorageSettings)[0].DatastoreType) + d.Set("redundancy", (*props.StorageSettings)[0].Type) + } + } + if err := d.Set("identity", flattenBackupVaultDppIdentityDetails(resp.Identity)); err != nil { + return fmt.Errorf("setting `identity`: %+v", err) + } + return tags.FlattenAndSet(d, resp.Tags) +} + +func resourceDataProtectionBackupVaultUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataProtection.BackupVaultClient + ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.BackupVaultID(d.Id()) + if err != nil { + return err + } + + parameters := dataprotection.PatchResourceRequestInput{} + if d.HasChange("identity") { + parameters.Identity = expandBackupVaultDppIdentityDetails(d.Get("identity").([]interface{})) + } + if d.HasChange("tags") { + parameters.Tags = tags.Expand(d.Get("tags").(map[string]interface{})) + } + + future, err := client.Patch(ctx, id.Name, id.ResourceGroup, parameters) + if err != nil { + return fmt.Errorf("updating DataProtection BackupVault (%q): %+v", id, err) + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for update of the DataProtection BackupVault %q: %+v", id, err) + } + return resourceDataProtectionBackupVaultRead(d, meta) +} + +func resourceDataProtectionBackupVaultDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DataProtection.BackupVaultClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.BackupVaultID(d.Id()) + if err != nil { + return err + } + + if resp, err := client.Delete(ctx, id.Name, id.ResourceGroup); err != nil { + if utils.ResponseWasNotFound(resp) { + return nil + } + return fmt.Errorf("deleting DataProtection BackupVault (%q): %+v", id, err) + } + return nil +} + +func expandBackupVaultDppIdentityDetails(input []interface{}) *dataprotection.DppIdentityDetails { + config, _ := identity.SystemAssigned{}.Expand(input) + return &dataprotection.DppIdentityDetails{ + Type: utils.String(config.Type), + } +} + +func flattenBackupVaultDppIdentityDetails(input *dataprotection.DppIdentityDetails) []interface{} { + var config *identity.ExpandedConfig + if input != nil { + config = &identity.ExpandedConfig{ + Type: *input.Type, + PrincipalId: input.PrincipalID, + TenantId: input.TenantID, + } + } + return identity.SystemAssigned{}.Flatten(config) +} diff --git a/azurerm/internal/services/dataprotection/data_protection_backup_vault_resource_test.go b/azurerm/internal/services/dataprotection/data_protection_backup_vault_resource_test.go new file mode 100644 index 000000000000..8a7650abd757 --- /dev/null +++ b/azurerm/internal/services/dataprotection/data_protection_backup_vault_resource_test.go @@ -0,0 +1,210 @@ +package dataprotection_test + +import ( + "context" + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type DataProtectionBackupVaultResource struct{} + +func TestAccDataProtectionBackupVault_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_vault", "test") + r := DataProtectionBackupVaultResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataProtectionBackupVault_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_vault", "test") + r := DataProtectionBackupVaultResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func TestAccDataProtectionBackupVault_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_vault", "test") + r := DataProtectionBackupVaultResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataProtectionBackupVault_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_vault", "test") + r := DataProtectionBackupVaultResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataProtectionBackupVault_updateIdentity(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_vault", "test") + r := DataProtectionBackupVaultResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("identity.0.principal_id").Exists(), + check.That(data.ResourceName).Key("identity.0.tenant_id").Exists(), + ), + }, + data.ImportStep(), + { + Config: r.updateIdentity(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("identity.0.principal_id").DoesNotExist(), + check.That(data.ResourceName).Key("identity.0.tenant_id").DoesNotExist(), + ), + }, + data.ImportStep(), + }) +} + +func (r DataProtectionBackupVaultResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.BackupVaultID(state.ID) + if err != nil { + return nil, err + } + resp, err := client.DataProtection.BackupVaultClient.Get(ctx, id.Name, id.ResourceGroup) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return utils.Bool(false), nil + } + return nil, fmt.Errorf("retrieving DataProtection BackupVault (%q): %+v", id, err) + } + return utils.Bool(true), nil +} + +func (r DataProtectionBackupVaultResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctest-dataprotection-%d" + location = "%s" +} +`, data.RandomInteger, data.Locations.Primary) +} + +func (r DataProtectionBackupVaultResource) basic(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_data_protection_backup_vault" "test" { + name = "acctest-bv-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + datastore_type = "VaultStore" + redundancy = "LocallyRedundant" +} +`, template, data.RandomInteger) +} + +func (r DataProtectionBackupVaultResource) requiresImport(data acceptance.TestData) string { + config := r.basic(data) + return fmt.Sprintf(` +%s + +resource "azurerm_data_protection_backup_vault" "import" { + name = azurerm_data_protection_backup_vault.test.name + resource_group_name = azurerm_data_protection_backup_vault.test.resource_group_name + location = azurerm_data_protection_backup_vault.test.location + datastore_type = azurerm_data_protection_backup_vault.test.datastore_type + redundancy = azurerm_data_protection_backup_vault.test.redundancy +} +`, config) +} + +func (r DataProtectionBackupVaultResource) complete(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_data_protection_backup_vault" "test" { + name = "acctest-bv-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + datastore_type = "VaultStore" + redundancy = "LocallyRedundant" + identity { + type = "SystemAssigned" + } + + tags = { + ENV = "Test" + } +} +`, template, data.RandomInteger) +} + +func (r DataProtectionBackupVaultResource) updateIdentity(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_data_protection_backup_vault" "test" { + name = "acctest-bv-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + datastore_type = "VaultStore" + redundancy = "LocallyRedundant" + + tags = { + ENV = "Test" + } +} +`, template, data.RandomInteger) +} diff --git a/azurerm/internal/services/dataprotection/legacysdk/dataprotection/backupinstances.go b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/backupinstances.go new file mode 100644 index 000000000000..ca796c80cfc3 --- /dev/null +++ b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/backupinstances.go @@ -0,0 +1,866 @@ +package dataprotection + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" +) + +// BackupInstancesClient is the open API 2.0 Specs for Azure Data Protection service +type BackupInstancesClient struct { + BaseClient +} + +// NewBackupInstancesClient creates an instance of the BackupInstancesClient client. +func NewBackupInstancesClient(subscriptionID string) BackupInstancesClient { + return NewBackupInstancesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewBackupInstancesClientWithBaseURI creates an instance of the BackupInstancesClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewBackupInstancesClientWithBaseURI(baseURI string, subscriptionID string) BackupInstancesClient { + return BackupInstancesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// AdhocBackup trigger adhoc backup +// Parameters: +// vaultName - the name of the backup vault. +// resourceGroupName - the name of the resource group where the backup vault is present. +// backupInstanceName - the name of the backup instance +// parameters - request body for operation +func (client BackupInstancesClient) AdhocBackup(ctx context.Context, vaultName string, resourceGroupName string, backupInstanceName string, parameters TriggerBackupRequest) (result BackupInstancesAdhocBackupFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupInstancesClient.AdhocBackup") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.BackupRuleOptions", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.BackupRuleOptions.RuleName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.BackupRuleOptions.TriggerOption", Name: validation.Null, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("dataprotection.BackupInstancesClient", "AdhocBackup", err.Error()) + } + + req, err := client.AdhocBackupPreparer(ctx, vaultName, resourceGroupName, backupInstanceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesClient", "AdhocBackup", nil, "Failure preparing request") + return + } + + result, err = client.AdhocBackupSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesClient", "AdhocBackup", nil, "Failure sending request") + return + } + + return +} + +// AdhocBackupPreparer prepares the AdhocBackup request. +func (client BackupInstancesClient) AdhocBackupPreparer(ctx context.Context, vaultName string, resourceGroupName string, backupInstanceName string, parameters TriggerBackupRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "backupInstanceName": autorest.Encode("path", backupInstanceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}/backupInstances/{backupInstanceName}/backup", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// AdhocBackupSender sends the AdhocBackup request. The method will close the +// http.Response Body if it receives an error. +func (client BackupInstancesClient) AdhocBackupSender(req *http.Request) (future BackupInstancesAdhocBackupFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// AdhocBackupResponder handles the response to the AdhocBackup request. The method always +// closes the http.Response Body. +func (client BackupInstancesClient) AdhocBackupResponder(resp *http.Response) (result OperationJobExtendedInfo, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdate create or update a backup instance in a backup vault +// Parameters: +// vaultName - the name of the backup vault. +// resourceGroupName - the name of the resource group where the backup vault is present. +// backupInstanceName - the name of the backup instance +// parameters - request body for operation +func (client BackupInstancesClient) CreateOrUpdate(ctx context.Context, vaultName string, resourceGroupName string, backupInstanceName string, parameters BackupInstanceResource) (result BackupInstancesCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupInstancesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Properties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.DataSourceInfo", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.Properties.DataSourceInfo.ResourceID", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.Properties.DataSourceSetInfo", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.DataSourceSetInfo.ResourceID", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.Properties.PolicyInfo", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.Properties.PolicyInfo.PolicyID", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.Properties.ProtectionStatus", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.ProtectionStatus.ErrorDetails", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.ProtectionStatus.ErrorDetails.InnerError", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.ProtectionStatus.ErrorDetails.InnerError.EmbeddedInnerError", Name: validation.Null, Rule: false, Chain: nil}}}, + }}, + }}, + {Target: "parameters.Properties.ProtectionErrorDetails", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.ProtectionErrorDetails.InnerError", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.ProtectionErrorDetails.InnerError.EmbeddedInnerError", Name: validation.Null, Rule: false, Chain: nil}}}, + }}, + {Target: "parameters.Properties.ObjectType", Name: validation.Null, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("dataprotection.BackupInstancesClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, vaultName, resourceGroupName, backupInstanceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesClient", "CreateOrUpdate", nil, "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client BackupInstancesClient) CreateOrUpdatePreparer(ctx context.Context, vaultName string, resourceGroupName string, backupInstanceName string, parameters BackupInstanceResource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "backupInstanceName": autorest.Encode("path", backupInstanceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}/backupInstances/{backupInstanceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client BackupInstancesClient) CreateOrUpdateSender(req *http.Request) (future BackupInstancesCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client BackupInstancesClient) CreateOrUpdateResponder(resp *http.Response) (result BackupInstanceResource, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete a backup instance in a backup vault +// Parameters: +// vaultName - the name of the backup vault. +// resourceGroupName - the name of the resource group where the backup vault is present. +// backupInstanceName - the name of the backup instance +func (client BackupInstancesClient) Delete(ctx context.Context, vaultName string, resourceGroupName string, backupInstanceName string) (result BackupInstancesDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupInstancesClient.Delete") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, vaultName, resourceGroupName, backupInstanceName) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesClient", "Delete", nil, "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client BackupInstancesClient) DeletePreparer(ctx context.Context, vaultName string, resourceGroupName string, backupInstanceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "backupInstanceName": autorest.Encode("path", backupInstanceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}/backupInstances/{backupInstanceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client BackupInstancesClient) DeleteSender(req *http.Request) (future BackupInstancesDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client BackupInstancesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a backup instance with name in a backup vault +// Parameters: +// vaultName - the name of the backup vault. +// resourceGroupName - the name of the resource group where the backup vault is present. +// backupInstanceName - the name of the backup instance +func (client BackupInstancesClient) Get(ctx context.Context, vaultName string, resourceGroupName string, backupInstanceName string) (result BackupInstanceResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupInstancesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, vaultName, resourceGroupName, backupInstanceName) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client BackupInstancesClient) GetPreparer(ctx context.Context, vaultName string, resourceGroupName string, backupInstanceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "backupInstanceName": autorest.Encode("path", backupInstanceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}/backupInstances/{backupInstanceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client BackupInstancesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client BackupInstancesClient) GetResponder(resp *http.Response) (result BackupInstanceResource, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a backup instances belonging to a backup vault +// Parameters: +// vaultName - the name of the backup vault. +// resourceGroupName - the name of the resource group where the backup vault is present. +func (client BackupInstancesClient) List(ctx context.Context, vaultName string, resourceGroupName string) (result BackupInstanceResourceListPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupInstancesClient.List") + defer func() { + sc := -1 + if result.birl.Response.Response != nil { + sc = result.birl.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, vaultName, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.birl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesClient", "List", resp, "Failure sending request") + return + } + + result.birl, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesClient", "List", resp, "Failure responding to request") + return + } + if result.birl.hasNextLink() && result.birl.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListPreparer prepares the List request. +func (client BackupInstancesClient) ListPreparer(ctx context.Context, vaultName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}/backupInstances", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client BackupInstancesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client BackupInstancesClient) ListResponder(resp *http.Response) (result BackupInstanceResourceList, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client BackupInstancesClient) listNextResults(ctx context.Context, lastResults BackupInstanceResourceList) (result BackupInstanceResourceList, err error) { + req, err := lastResults.backupInstanceResourceListPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "dataprotection.BackupInstancesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "dataprotection.BackupInstancesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client BackupInstancesClient) ListComplete(ctx context.Context, vaultName string, resourceGroupName string) (result BackupInstanceResourceListIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupInstancesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, vaultName, resourceGroupName) + return +} + +// TriggerRehydrate rehydrate recovery point for restore for a BackupInstance +// Parameters: +// resourceGroupName - the name of the resource group where the backup vault is present. +// vaultName - the name of the backup vault. +// parameters - request body for operation +func (client BackupInstancesClient) TriggerRehydrate(ctx context.Context, resourceGroupName string, vaultName string, parameters AzureBackupRehydrationRequest, backupInstanceName string) (result BackupInstancesTriggerRehydrateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupInstancesClient.TriggerRehydrate") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.RecoveryPointID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.RehydrationRetentionDuration", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("dataprotection.BackupInstancesClient", "TriggerRehydrate", err.Error()) + } + + req, err := client.TriggerRehydratePreparer(ctx, resourceGroupName, vaultName, parameters, backupInstanceName) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesClient", "TriggerRehydrate", nil, "Failure preparing request") + return + } + + result, err = client.TriggerRehydrateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesClient", "TriggerRehydrate", nil, "Failure sending request") + return + } + + return +} + +// TriggerRehydratePreparer prepares the TriggerRehydrate request. +func (client BackupInstancesClient) TriggerRehydratePreparer(ctx context.Context, resourceGroupName string, vaultName string, parameters AzureBackupRehydrationRequest, backupInstanceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "backupInstanceName": autorest.Encode("path", backupInstanceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}/backupInstances/{backupInstanceName}/rehydrate", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// TriggerRehydrateSender sends the TriggerRehydrate request. The method will close the +// http.Response Body if it receives an error. +func (client BackupInstancesClient) TriggerRehydrateSender(req *http.Request) (future BackupInstancesTriggerRehydrateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// TriggerRehydrateResponder handles the response to the TriggerRehydrate request. The method always +// closes the http.Response Body. +func (client BackupInstancesClient) TriggerRehydrateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// TriggerRestore triggers restore for a BackupInstance +// Parameters: +// vaultName - the name of the backup vault. +// resourceGroupName - the name of the resource group where the backup vault is present. +// backupInstanceName - the name of the backup instance +// parameters - request body for operation +func (client BackupInstancesClient) TriggerRestore(ctx context.Context, vaultName string, resourceGroupName string, backupInstanceName string, parameters BasicAzureBackupRestoreRequest) (result BackupInstancesTriggerRestoreFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupInstancesClient.TriggerRestore") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.TriggerRestorePreparer(ctx, vaultName, resourceGroupName, backupInstanceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesClient", "TriggerRestore", nil, "Failure preparing request") + return + } + + result, err = client.TriggerRestoreSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesClient", "TriggerRestore", nil, "Failure sending request") + return + } + + return +} + +// TriggerRestorePreparer prepares the TriggerRestore request. +func (client BackupInstancesClient) TriggerRestorePreparer(ctx context.Context, vaultName string, resourceGroupName string, backupInstanceName string, parameters BasicAzureBackupRestoreRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "backupInstanceName": autorest.Encode("path", backupInstanceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}/backupInstances/{backupInstanceName}/restore", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// TriggerRestoreSender sends the TriggerRestore request. The method will close the +// http.Response Body if it receives an error. +func (client BackupInstancesClient) TriggerRestoreSender(req *http.Request) (future BackupInstancesTriggerRestoreFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// TriggerRestoreResponder handles the response to the TriggerRestore request. The method always +// closes the http.Response Body. +func (client BackupInstancesClient) TriggerRestoreResponder(resp *http.Response) (result OperationJobExtendedInfo, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ValidateForBackup validate whether adhoc backup will be successful or not +// Parameters: +// vaultName - the name of the backup vault. +// resourceGroupName - the name of the resource group where the backup vault is present. +// parameters - request body for operation +func (client BackupInstancesClient) ValidateForBackup(ctx context.Context, vaultName string, resourceGroupName string, parameters ValidateForBackupRequest) (result BackupInstancesValidateForBackupFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupInstancesClient.ValidateForBackup") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.BackupInstance", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.BackupInstance.DataSourceInfo", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.BackupInstance.DataSourceInfo.ResourceID", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.BackupInstance.DataSourceSetInfo", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.BackupInstance.DataSourceSetInfo.ResourceID", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.BackupInstance.PolicyInfo", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.BackupInstance.PolicyInfo.PolicyID", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.BackupInstance.ProtectionStatus", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.BackupInstance.ProtectionStatus.ErrorDetails", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.BackupInstance.ProtectionStatus.ErrorDetails.InnerError", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.BackupInstance.ProtectionStatus.ErrorDetails.InnerError.EmbeddedInnerError", Name: validation.Null, Rule: false, Chain: nil}}}, + }}, + }}, + {Target: "parameters.BackupInstance.ProtectionErrorDetails", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.BackupInstance.ProtectionErrorDetails.InnerError", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.BackupInstance.ProtectionErrorDetails.InnerError.EmbeddedInnerError", Name: validation.Null, Rule: false, Chain: nil}}}, + }}, + {Target: "parameters.BackupInstance.ObjectType", Name: validation.Null, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("dataprotection.BackupInstancesClient", "ValidateForBackup", err.Error()) + } + + req, err := client.ValidateForBackupPreparer(ctx, vaultName, resourceGroupName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesClient", "ValidateForBackup", nil, "Failure preparing request") + return + } + + result, err = client.ValidateForBackupSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesClient", "ValidateForBackup", nil, "Failure sending request") + return + } + + return +} + +// ValidateForBackupPreparer prepares the ValidateForBackup request. +func (client BackupInstancesClient) ValidateForBackupPreparer(ctx context.Context, vaultName string, resourceGroupName string, parameters ValidateForBackupRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}/validateForBackup", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ValidateForBackupSender sends the ValidateForBackup request. The method will close the +// http.Response Body if it receives an error. +func (client BackupInstancesClient) ValidateForBackupSender(req *http.Request) (future BackupInstancesValidateForBackupFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// ValidateForBackupResponder handles the response to the ValidateForBackup request. The method always +// closes the http.Response Body. +func (client BackupInstancesClient) ValidateForBackupResponder(resp *http.Response) (result OperationJobExtendedInfo, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ValidateRestore validates if Restore can be triggered for a DataSource +// Parameters: +// vaultName - the name of the backup vault. +// resourceGroupName - the name of the resource group where the backup vault is present. +// backupInstanceName - the name of the backup instance +// parameters - request body for operation +func (client BackupInstancesClient) ValidateRestore(ctx context.Context, vaultName string, resourceGroupName string, backupInstanceName string, parameters ValidateRestoreRequestObject) (result BackupInstancesValidateRestoreFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupInstancesClient.ValidateRestore") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ValidateRestorePreparer(ctx, vaultName, resourceGroupName, backupInstanceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesClient", "ValidateRestore", nil, "Failure preparing request") + return + } + + result, err = client.ValidateRestoreSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesClient", "ValidateRestore", nil, "Failure sending request") + return + } + + return +} + +// ValidateRestorePreparer prepares the ValidateRestore request. +func (client BackupInstancesClient) ValidateRestorePreparer(ctx context.Context, vaultName string, resourceGroupName string, backupInstanceName string, parameters ValidateRestoreRequestObject) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "backupInstanceName": autorest.Encode("path", backupInstanceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}/backupInstances/{backupInstanceName}/validateRestore", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ValidateRestoreSender sends the ValidateRestore request. The method will close the +// http.Response Body if it receives an error. +func (client BackupInstancesClient) ValidateRestoreSender(req *http.Request) (future BackupInstancesValidateRestoreFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// ValidateRestoreResponder handles the response to the ValidateRestore request. The method always +// closes the http.Response Body. +func (client BackupInstancesClient) ValidateRestoreResponder(resp *http.Response) (result OperationJobExtendedInfo, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/azurerm/internal/services/dataprotection/legacysdk/dataprotection/backuppolicies.go b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/backuppolicies.go new file mode 100644 index 000000000000..e26ad7d98233 --- /dev/null +++ b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/backuppolicies.go @@ -0,0 +1,383 @@ +package dataprotection + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" +) + +// BackupPoliciesClient is the open API 2.0 Specs for Azure Data Protection service +type BackupPoliciesClient struct { + BaseClient +} + +// NewBackupPoliciesClient creates an instance of the BackupPoliciesClient client. +func NewBackupPoliciesClient(subscriptionID string) BackupPoliciesClient { + return NewBackupPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewBackupPoliciesClientWithBaseURI creates an instance of the BackupPoliciesClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewBackupPoliciesClientWithBaseURI(baseURI string, subscriptionID string) BackupPoliciesClient { + return BackupPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate sends the create or update request. +// Parameters: +// vaultName - the name of the backup vault. +// resourceGroupName - the name of the resource group where the backup vault is present. +// parameters - request body for operation +func (client BackupPoliciesClient) CreateOrUpdate(ctx context.Context, vaultName string, resourceGroupName string, backupPolicyName string, parameters BaseBackupPolicyResource) (result BaseBackupPolicyResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupPoliciesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, vaultName, resourceGroupName, backupPolicyName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupPoliciesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dataprotection.BackupPoliciesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupPoliciesClient", "CreateOrUpdate", resp, "Failure responding to request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client BackupPoliciesClient) CreateOrUpdatePreparer(ctx context.Context, vaultName string, resourceGroupName string, backupPolicyName string, parameters BaseBackupPolicyResource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "backupPolicyName": autorest.Encode("path", backupPolicyName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}/backupPolicies/{backupPolicyName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client BackupPoliciesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client BackupPoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result BaseBackupPolicyResource, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete sends the delete request. +// Parameters: +// vaultName - the name of the backup vault. +// resourceGroupName - the name of the resource group where the backup vault is present. +func (client BackupPoliciesClient) Delete(ctx context.Context, vaultName string, resourceGroupName string, backupPolicyName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupPoliciesClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, vaultName, resourceGroupName, backupPolicyName) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupPoliciesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "dataprotection.BackupPoliciesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupPoliciesClient", "Delete", resp, "Failure responding to request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client BackupPoliciesClient) DeletePreparer(ctx context.Context, vaultName string, resourceGroupName string, backupPolicyName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "backupPolicyName": autorest.Encode("path", backupPolicyName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}/backupPolicies/{backupPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client BackupPoliciesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client BackupPoliciesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a backup policy belonging to a backup vault +// Parameters: +// vaultName - the name of the backup vault. +// resourceGroupName - the name of the resource group where the backup vault is present. +func (client BackupPoliciesClient) Get(ctx context.Context, vaultName string, resourceGroupName string, backupPolicyName string) (result BaseBackupPolicyResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupPoliciesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, vaultName, resourceGroupName, backupPolicyName) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupPoliciesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dataprotection.BackupPoliciesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupPoliciesClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client BackupPoliciesClient) GetPreparer(ctx context.Context, vaultName string, resourceGroupName string, backupPolicyName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "backupPolicyName": autorest.Encode("path", backupPolicyName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}/backupPolicies/{backupPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client BackupPoliciesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client BackupPoliciesClient) GetResponder(resp *http.Response) (result BaseBackupPolicyResource, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List returns list of backup policies belonging to a backup vault +// Parameters: +// vaultName - the name of the backup vault. +// resourceGroupName - the name of the resource group where the backup vault is present. +func (client BackupPoliciesClient) List(ctx context.Context, vaultName string, resourceGroupName string) (result BaseBackupPolicyResourceListPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupPoliciesClient.List") + defer func() { + sc := -1 + if result.bbprl.Response.Response != nil { + sc = result.bbprl.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, vaultName, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupPoliciesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.bbprl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dataprotection.BackupPoliciesClient", "List", resp, "Failure sending request") + return + } + + result.bbprl, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupPoliciesClient", "List", resp, "Failure responding to request") + return + } + if result.bbprl.hasNextLink() && result.bbprl.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListPreparer prepares the List request. +func (client BackupPoliciesClient) ListPreparer(ctx context.Context, vaultName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}/backupPolicies", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client BackupPoliciesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client BackupPoliciesClient) ListResponder(resp *http.Response) (result BaseBackupPolicyResourceList, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client BackupPoliciesClient) listNextResults(ctx context.Context, lastResults BaseBackupPolicyResourceList) (result BaseBackupPolicyResourceList, err error) { + req, err := lastResults.baseBackupPolicyResourceListPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "dataprotection.BackupPoliciesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "dataprotection.BackupPoliciesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupPoliciesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client BackupPoliciesClient) ListComplete(ctx context.Context, vaultName string, resourceGroupName string) (result BaseBackupPolicyResourceListIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupPoliciesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, vaultName, resourceGroupName) + return +} diff --git a/azurerm/internal/services/dataprotection/legacysdk/dataprotection/backupvaults.go b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/backupvaults.go new file mode 100644 index 000000000000..e30eaee8621b --- /dev/null +++ b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/backupvaults.go @@ -0,0 +1,661 @@ +package dataprotection + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" +) + +// BackupVaultsClient is the open API 2.0 Specs for Azure Data Protection service +type BackupVaultsClient struct { + BaseClient +} + +// NewBackupVaultsClient creates an instance of the BackupVaultsClient client. +func NewBackupVaultsClient(subscriptionID string) BackupVaultsClient { + return NewBackupVaultsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewBackupVaultsClientWithBaseURI creates an instance of the BackupVaultsClient client using a custom endpoint. Use +// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewBackupVaultsClientWithBaseURI(baseURI string, subscriptionID string) BackupVaultsClient { + return BackupVaultsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckNameAvailability sends the check name availability request. +// Parameters: +// resourceGroupName - the name of the resource group where the backup vault is present. +// location - the location in which uniqueness will be verified. +// parameters - check name availability request +func (client BackupVaultsClient) CheckNameAvailability(ctx context.Context, resourceGroupName string, location string, parameters CheckNameAvailabilityRequest) (result CheckNameAvailabilityResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupVaultsClient.CheckNameAvailability") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CheckNameAvailabilityPreparer(ctx, resourceGroupName, location, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "CheckNameAvailability", nil, "Failure preparing request") + return + } + + resp, err := client.CheckNameAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "CheckNameAvailability", resp, "Failure sending request") + return + } + + result, err = client.CheckNameAvailabilityResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "CheckNameAvailability", resp, "Failure responding to request") + return + } + + return +} + +// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. +func (client BackupVaultsClient) CheckNameAvailabilityPreparer(ctx context.Context, resourceGroupName string, location string, parameters CheckNameAvailabilityRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/locations/{location}/checkNameAvailability", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client BackupVaultsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always +// closes the http.Response Body. +func (client BackupVaultsClient) CheckNameAvailabilityResponder(resp *http.Response) (result CheckNameAvailabilityResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdate creates or updates a BackupVault resource belonging to a resource group. +// Parameters: +// vaultName - the name of the backup vault. +// resourceGroupName - the name of the resource group where the backup vault is present. +// parameters - request body for operation +func (client BackupVaultsClient) CreateOrUpdate(ctx context.Context, vaultName string, resourceGroupName string, parameters BackupVaultResource) (result BackupVaultsCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupVaultsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Properties", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.Properties.StorageSettings", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("dataprotection.BackupVaultsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, vaultName, resourceGroupName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "CreateOrUpdate", nil, "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client BackupVaultsClient) CreateOrUpdatePreparer(ctx context.Context, vaultName string, resourceGroupName string, parameters BackupVaultResource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client BackupVaultsClient) CreateOrUpdateSender(req *http.Request) (future BackupVaultsCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client BackupVaultsClient) CreateOrUpdateResponder(resp *http.Response) (result BackupVaultResource, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a BackupVault resource from the resource group. +// Parameters: +// vaultName - the name of the backup vault. +// resourceGroupName - the name of the resource group where the backup vault is present. +func (client BackupVaultsClient) Delete(ctx context.Context, vaultName string, resourceGroupName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupVaultsClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, vaultName, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "Delete", resp, "Failure responding to request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client BackupVaultsClient) DeletePreparer(ctx context.Context, vaultName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client BackupVaultsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client BackupVaultsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get returns a resource belonging to a resource group. +// Parameters: +// vaultName - the name of the backup vault. +// resourceGroupName - the name of the resource group where the backup vault is present. +func (client BackupVaultsClient) Get(ctx context.Context, vaultName string, resourceGroupName string) (result BackupVaultResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupVaultsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, vaultName, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client BackupVaultsClient) GetPreparer(ctx context.Context, vaultName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client BackupVaultsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client BackupVaultsClient) GetResponder(resp *http.Response) (result BackupVaultResource, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetResourcesInResourceGroup returns resource collection belonging to a resource group. +// Parameters: +// resourceGroupName - the name of the resource group where the backup vault is present. +func (client BackupVaultsClient) GetResourcesInResourceGroup(ctx context.Context, resourceGroupName string) (result BackupVaultResourceListPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupVaultsClient.GetResourcesInResourceGroup") + defer func() { + sc := -1 + if result.bvrl.Response.Response != nil { + sc = result.bvrl.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.getResourcesInResourceGroupNextResults + req, err := client.GetResourcesInResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "GetResourcesInResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.GetResourcesInResourceGroupSender(req) + if err != nil { + result.bvrl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "GetResourcesInResourceGroup", resp, "Failure sending request") + return + } + + result.bvrl, err = client.GetResourcesInResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "GetResourcesInResourceGroup", resp, "Failure responding to request") + return + } + if result.bvrl.hasNextLink() && result.bvrl.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// GetResourcesInResourceGroupPreparer prepares the GetResourcesInResourceGroup request. +func (client BackupVaultsClient) GetResourcesInResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetResourcesInResourceGroupSender sends the GetResourcesInResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client BackupVaultsClient) GetResourcesInResourceGroupSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResourcesInResourceGroupResponder handles the response to the GetResourcesInResourceGroup request. The method always +// closes the http.Response Body. +func (client BackupVaultsClient) GetResourcesInResourceGroupResponder(resp *http.Response) (result BackupVaultResourceList, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// getResourcesInResourceGroupNextResults retrieves the next set of results, if any. +func (client BackupVaultsClient) getResourcesInResourceGroupNextResults(ctx context.Context, lastResults BackupVaultResourceList) (result BackupVaultResourceList, err error) { + req, err := lastResults.backupVaultResourceListPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "getResourcesInResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.GetResourcesInResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "getResourcesInResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.GetResourcesInResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "getResourcesInResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// GetResourcesInResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client BackupVaultsClient) GetResourcesInResourceGroupComplete(ctx context.Context, resourceGroupName string) (result BackupVaultResourceListIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupVaultsClient.GetResourcesInResourceGroup") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.GetResourcesInResourceGroup(ctx, resourceGroupName) + return +} + +// GetResourcesInSubscription returns resource collection belonging to a subscription. +func (client BackupVaultsClient) GetResourcesInSubscription(ctx context.Context) (result BackupVaultResourceListPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupVaultsClient.GetResourcesInSubscription") + defer func() { + sc := -1 + if result.bvrl.Response.Response != nil { + sc = result.bvrl.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.getResourcesInSubscriptionNextResults + req, err := client.GetResourcesInSubscriptionPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "GetResourcesInSubscription", nil, "Failure preparing request") + return + } + + resp, err := client.GetResourcesInSubscriptionSender(req) + if err != nil { + result.bvrl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "GetResourcesInSubscription", resp, "Failure sending request") + return + } + + result.bvrl, err = client.GetResourcesInSubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "GetResourcesInSubscription", resp, "Failure responding to request") + return + } + if result.bvrl.hasNextLink() && result.bvrl.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// GetResourcesInSubscriptionPreparer prepares the GetResourcesInSubscription request. +func (client BackupVaultsClient) GetResourcesInSubscriptionPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.DataProtection/backupVaults", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetResourcesInSubscriptionSender sends the GetResourcesInSubscription request. The method will close the +// http.Response Body if it receives an error. +func (client BackupVaultsClient) GetResourcesInSubscriptionSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResourcesInSubscriptionResponder handles the response to the GetResourcesInSubscription request. The method always +// closes the http.Response Body. +func (client BackupVaultsClient) GetResourcesInSubscriptionResponder(resp *http.Response) (result BackupVaultResourceList, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// getResourcesInSubscriptionNextResults retrieves the next set of results, if any. +func (client BackupVaultsClient) getResourcesInSubscriptionNextResults(ctx context.Context, lastResults BackupVaultResourceList) (result BackupVaultResourceList, err error) { + req, err := lastResults.backupVaultResourceListPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "getResourcesInSubscriptionNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.GetResourcesInSubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "getResourcesInSubscriptionNextResults", resp, "Failure sending next results request") + } + result, err = client.GetResourcesInSubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "getResourcesInSubscriptionNextResults", resp, "Failure responding to next results request") + } + return +} + +// GetResourcesInSubscriptionComplete enumerates all values, automatically crossing page boundaries as required. +func (client BackupVaultsClient) GetResourcesInSubscriptionComplete(ctx context.Context) (result BackupVaultResourceListIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupVaultsClient.GetResourcesInSubscription") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.GetResourcesInSubscription(ctx) + return +} + +// Patch updates a BackupVault resource belonging to a resource group. For example, updating tags for a resource. +// Parameters: +// vaultName - the name of the backup vault. +// resourceGroupName - the name of the resource group where the backup vault is present. +// parameters - request body for operation +func (client BackupVaultsClient) Patch(ctx context.Context, vaultName string, resourceGroupName string, parameters PatchResourceRequestInput) (result BackupVaultsPatchFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupVaultsClient.Patch") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.PatchPreparer(ctx, vaultName, resourceGroupName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "Patch", nil, "Failure preparing request") + return + } + + result, err = client.PatchSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsClient", "Patch", nil, "Failure sending request") + return + } + + return +} + +// PatchPreparer prepares the Patch request. +func (client BackupVaultsClient) PatchPreparer(ctx context.Context, vaultName string, resourceGroupName string, parameters PatchResourceRequestInput) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PatchSender sends the Patch request. The method will close the +// http.Response Body if it receives an error. +func (client BackupVaultsClient) PatchSender(req *http.Request) (future BackupVaultsPatchFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// PatchResponder handles the response to the Patch request. The method always +// closes the http.Response Body. +func (client BackupVaultsClient) PatchResponder(resp *http.Response) (result BackupVaultResource, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/azurerm/internal/services/dataprotection/legacysdk/dataprotection/client.go b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/client.go new file mode 100644 index 000000000000..212d8c740cb5 --- /dev/null +++ b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/client.go @@ -0,0 +1,272 @@ +// Package dataprotection implements the Azure ARM Dataprotection service API version 2021-01-01. +// +// Open API 2.0 Specs for Azure Data Protection service +package dataprotection + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" +) + +const ( + // DefaultBaseURI is the default URI used for the service Dataprotection + DefaultBaseURI = "https://management.azure.com" +) + +// BaseClient is the base client for Dataprotection. +type BaseClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the BaseClient client. +func New(subscriptionID string) BaseClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the BaseClient client using a custom endpoint. Use this when interacting with +// an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { + return BaseClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} + +// CheckFeatureSupport sends the check feature support request. +// Parameters: +// parameters - feature support request object +func (client BaseClient) CheckFeatureSupport(ctx context.Context, location string, parameters BasicFeatureValidationRequestBase) (result FeatureValidationResponseBaseModel, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.CheckFeatureSupport") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CheckFeatureSupportPreparer(ctx, location, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BaseClient", "CheckFeatureSupport", nil, "Failure preparing request") + return + } + + resp, err := client.CheckFeatureSupportSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dataprotection.BaseClient", "CheckFeatureSupport", resp, "Failure sending request") + return + } + + result, err = client.CheckFeatureSupportResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BaseClient", "CheckFeatureSupport", resp, "Failure responding to request") + return + } + + return +} + +// CheckFeatureSupportPreparer prepares the CheckFeatureSupport request. +func (client BaseClient) CheckFeatureSupportPreparer(ctx context.Context, location string, parameters BasicFeatureValidationRequestBase) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.DataProtection/locations/{location}/checkFeatureSupport", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CheckFeatureSupportSender sends the CheckFeatureSupport request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) CheckFeatureSupportSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CheckFeatureSupportResponder handles the response to the CheckFeatureSupport request. The method always +// closes the http.Response Body. +func (client BaseClient) CheckFeatureSupportResponder(resp *http.Response) (result FeatureValidationResponseBaseModel, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetOperationResultPatch sends the get operation result patch request. +// Parameters: +// vaultName - the name of the backup vault. +// resourceGroupName - the name of the resource group where the backup vault is present. +func (client BaseClient) GetOperationResultPatch(ctx context.Context, vaultName string, resourceGroupName string, operationID string) (result BackupVaultResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetOperationResultPatch") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetOperationResultPatchPreparer(ctx, vaultName, resourceGroupName, operationID) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BaseClient", "GetOperationResultPatch", nil, "Failure preparing request") + return + } + + resp, err := client.GetOperationResultPatchSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dataprotection.BaseClient", "GetOperationResultPatch", resp, "Failure sending request") + return + } + + result, err = client.GetOperationResultPatchResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BaseClient", "GetOperationResultPatch", resp, "Failure responding to request") + return + } + + return +} + +// GetOperationResultPatchPreparer prepares the GetOperationResultPatch request. +func (client BaseClient) GetOperationResultPatchPreparer(ctx context.Context, vaultName string, resourceGroupName string, operationID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "operationId": autorest.Encode("path", operationID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}/operationResults/{operationId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetOperationResultPatchSender sends the GetOperationResultPatch request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetOperationResultPatchSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetOperationResultPatchResponder handles the response to the GetOperationResultPatch request. The method always +// closes the http.Response Body. +func (client BaseClient) GetOperationResultPatchResponder(resp *http.Response) (result BackupVaultResource, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetOperationStatus sends the get operation status request. +func (client BaseClient) GetOperationStatus(ctx context.Context, location string, operationID string) (result OperationResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetOperationStatus") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetOperationStatusPreparer(ctx, location, operationID) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BaseClient", "GetOperationStatus", nil, "Failure preparing request") + return + } + + resp, err := client.GetOperationStatusSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dataprotection.BaseClient", "GetOperationStatus", resp, "Failure sending request") + return + } + + result, err = client.GetOperationStatusResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BaseClient", "GetOperationStatus", resp, "Failure responding to request") + return + } + + return +} + +// GetOperationStatusPreparer prepares the GetOperationStatus request. +func (client BaseClient) GetOperationStatusPreparer(ctx context.Context, location string, operationID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "operationId": autorest.Encode("path", operationID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.DataProtection/locations/{location}/operationStatus/{operationId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetOperationStatusSender sends the GetOperationStatus request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetOperationStatusSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetOperationStatusResponder handles the response to the GetOperationStatus request. The method always +// closes the http.Response Body. +func (client BaseClient) GetOperationStatusResponder(resp *http.Response) (result OperationResource, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/azurerm/internal/services/dataprotection/legacysdk/dataprotection/enums.go b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/enums.go new file mode 100644 index 000000000000..301bb6838442 --- /dev/null +++ b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/enums.go @@ -0,0 +1,613 @@ +package dataprotection + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// AbsoluteMarker enumerates the values for absolute marker. +type AbsoluteMarker string + +const ( + // AllBackup ... + AllBackup AbsoluteMarker = "AllBackup" + // FirstOfDay ... + FirstOfDay AbsoluteMarker = "FirstOfDay" + // FirstOfMonth ... + FirstOfMonth AbsoluteMarker = "FirstOfMonth" + // FirstOfWeek ... + FirstOfWeek AbsoluteMarker = "FirstOfWeek" + // FirstOfYear ... + FirstOfYear AbsoluteMarker = "FirstOfYear" +) + +// PossibleAbsoluteMarkerValues returns an array of possible values for the AbsoluteMarker const type. +func PossibleAbsoluteMarkerValues() []AbsoluteMarker { + return []AbsoluteMarker{AllBackup, FirstOfDay, FirstOfMonth, FirstOfWeek, FirstOfYear} +} + +// CreatedByType enumerates the values for created by type. +type CreatedByType string + +const ( + // Application ... + Application CreatedByType = "Application" + // Key ... + Key CreatedByType = "Key" + // ManagedIdentity ... + ManagedIdentity CreatedByType = "ManagedIdentity" + // User ... + User CreatedByType = "User" +) + +// PossibleCreatedByTypeValues returns an array of possible values for the CreatedByType const type. +func PossibleCreatedByTypeValues() []CreatedByType { + return []CreatedByType{Application, Key, ManagedIdentity, User} +} + +// CurrentProtectionState enumerates the values for current protection state. +type CurrentProtectionState string + +const ( + // BackupSchedulesSuspended ... + BackupSchedulesSuspended CurrentProtectionState = "BackupSchedulesSuspended" + // ConfiguringProtection ... + ConfiguringProtection CurrentProtectionState = "ConfiguringProtection" + // ConfiguringProtectionFailed ... + ConfiguringProtectionFailed CurrentProtectionState = "ConfiguringProtectionFailed" + // Invalid ... + Invalid CurrentProtectionState = "Invalid" + // NotProtected ... + NotProtected CurrentProtectionState = "NotProtected" + // ProtectionConfigured ... + ProtectionConfigured CurrentProtectionState = "ProtectionConfigured" + // ProtectionError ... + ProtectionError CurrentProtectionState = "ProtectionError" + // ProtectionStopped ... + ProtectionStopped CurrentProtectionState = "ProtectionStopped" + // RetentionSchedulesSuspended ... + RetentionSchedulesSuspended CurrentProtectionState = "RetentionSchedulesSuspended" + // SoftDeleted ... + SoftDeleted CurrentProtectionState = "SoftDeleted" + // SoftDeleting ... + SoftDeleting CurrentProtectionState = "SoftDeleting" + // UpdatingProtection ... + UpdatingProtection CurrentProtectionState = "UpdatingProtection" +) + +// PossibleCurrentProtectionStateValues returns an array of possible values for the CurrentProtectionState const type. +func PossibleCurrentProtectionStateValues() []CurrentProtectionState { + return []CurrentProtectionState{BackupSchedulesSuspended, ConfiguringProtection, ConfiguringProtectionFailed, Invalid, NotProtected, ProtectionConfigured, ProtectionError, ProtectionStopped, RetentionSchedulesSuspended, SoftDeleted, SoftDeleting, UpdatingProtection} +} + +// DataStoreTypes enumerates the values for data store types. +type DataStoreTypes string + +const ( + // ArchiveStore ... + ArchiveStore DataStoreTypes = "ArchiveStore" + // OperationalStore ... + OperationalStore DataStoreTypes = "OperationalStore" + // VaultStore ... + VaultStore DataStoreTypes = "VaultStore" +) + +// PossibleDataStoreTypesValues returns an array of possible values for the DataStoreTypes const type. +func PossibleDataStoreTypesValues() []DataStoreTypes { + return []DataStoreTypes{ArchiveStore, OperationalStore, VaultStore} +} + +// DayOfWeek enumerates the values for day of week. +type DayOfWeek string + +const ( + // Friday ... + Friday DayOfWeek = "Friday" + // Monday ... + Monday DayOfWeek = "Monday" + // Saturday ... + Saturday DayOfWeek = "Saturday" + // Sunday ... + Sunday DayOfWeek = "Sunday" + // Thursday ... + Thursday DayOfWeek = "Thursday" + // Tuesday ... + Tuesday DayOfWeek = "Tuesday" + // Wednesday ... + Wednesday DayOfWeek = "Wednesday" +) + +// PossibleDayOfWeekValues returns an array of possible values for the DayOfWeek const type. +func PossibleDayOfWeekValues() []DayOfWeek { + return []DayOfWeek{Friday, Monday, Saturday, Sunday, Thursday, Tuesday, Wednesday} +} + +// FeatureSupportStatus enumerates the values for feature support status. +type FeatureSupportStatus string + +const ( + // FeatureSupportStatusAlphaPreview ... + FeatureSupportStatusAlphaPreview FeatureSupportStatus = "AlphaPreview" + // FeatureSupportStatusGenerallyAvailable ... + FeatureSupportStatusGenerallyAvailable FeatureSupportStatus = "GenerallyAvailable" + // FeatureSupportStatusInvalid ... + FeatureSupportStatusInvalid FeatureSupportStatus = "Invalid" + // FeatureSupportStatusNotSupported ... + FeatureSupportStatusNotSupported FeatureSupportStatus = "NotSupported" + // FeatureSupportStatusPrivatePreview ... + FeatureSupportStatusPrivatePreview FeatureSupportStatus = "PrivatePreview" + // FeatureSupportStatusPublicPreview ... + FeatureSupportStatusPublicPreview FeatureSupportStatus = "PublicPreview" +) + +// PossibleFeatureSupportStatusValues returns an array of possible values for the FeatureSupportStatus const type. +func PossibleFeatureSupportStatusValues() []FeatureSupportStatus { + return []FeatureSupportStatus{FeatureSupportStatusAlphaPreview, FeatureSupportStatusGenerallyAvailable, FeatureSupportStatusInvalid, FeatureSupportStatusNotSupported, FeatureSupportStatusPrivatePreview, FeatureSupportStatusPublicPreview} +} + +// FeatureType enumerates the values for feature type. +type FeatureType string + +const ( + // FeatureTypeDataSourceType ... + FeatureTypeDataSourceType FeatureType = "DataSourceType" + // FeatureTypeInvalid ... + FeatureTypeInvalid FeatureType = "Invalid" +) + +// PossibleFeatureTypeValues returns an array of possible values for the FeatureType const type. +func PossibleFeatureTypeValues() []FeatureType { + return []FeatureType{FeatureTypeDataSourceType, FeatureTypeInvalid} +} + +// Month enumerates the values for month. +type Month string + +const ( + // April ... + April Month = "April" + // August ... + August Month = "August" + // December ... + December Month = "December" + // February ... + February Month = "February" + // January ... + January Month = "January" + // July ... + July Month = "July" + // June ... + June Month = "June" + // March ... + March Month = "March" + // May ... + May Month = "May" + // November ... + November Month = "November" + // October ... + October Month = "October" + // September ... + September Month = "September" +) + +// PossibleMonthValues returns an array of possible values for the Month const type. +func PossibleMonthValues() []Month { + return []Month{April, August, December, February, January, July, June, March, May, November, October, September} +} + +// ObjectType enumerates the values for object type. +type ObjectType string + +const ( + // ObjectTypeAzureBackupDiscreteRecoveryPoint ... + ObjectTypeAzureBackupDiscreteRecoveryPoint ObjectType = "AzureBackupDiscreteRecoveryPoint" + // ObjectTypeAzureBackupRecoveryPoint ... + ObjectTypeAzureBackupRecoveryPoint ObjectType = "AzureBackupRecoveryPoint" +) + +// PossibleObjectTypeValues returns an array of possible values for the ObjectType const type. +func PossibleObjectTypeValues() []ObjectType { + return []ObjectType{ObjectTypeAzureBackupDiscreteRecoveryPoint, ObjectTypeAzureBackupRecoveryPoint} +} + +// ObjectTypeBasicAzureBackupRestoreRequest enumerates the values for object type basic azure backup restore +// request. +type ObjectTypeBasicAzureBackupRestoreRequest string + +const ( + // ObjectTypeAzureBackupRecoveryPointBasedRestoreRequest ... + ObjectTypeAzureBackupRecoveryPointBasedRestoreRequest ObjectTypeBasicAzureBackupRestoreRequest = "AzureBackupRecoveryPointBasedRestoreRequest" + // ObjectTypeAzureBackupRecoveryTimeBasedRestoreRequest ... + ObjectTypeAzureBackupRecoveryTimeBasedRestoreRequest ObjectTypeBasicAzureBackupRestoreRequest = "AzureBackupRecoveryTimeBasedRestoreRequest" + // ObjectTypeAzureBackupRestoreRequest ... + ObjectTypeAzureBackupRestoreRequest ObjectTypeBasicAzureBackupRestoreRequest = "AzureBackupRestoreRequest" + // ObjectTypeAzureBackupRestoreWithRehydrationRequest ... + ObjectTypeAzureBackupRestoreWithRehydrationRequest ObjectTypeBasicAzureBackupRestoreRequest = "AzureBackupRestoreWithRehydrationRequest" +) + +// PossibleObjectTypeBasicAzureBackupRestoreRequestValues returns an array of possible values for the ObjectTypeBasicAzureBackupRestoreRequest const type. +func PossibleObjectTypeBasicAzureBackupRestoreRequestValues() []ObjectTypeBasicAzureBackupRestoreRequest { + return []ObjectTypeBasicAzureBackupRestoreRequest{ObjectTypeAzureBackupRecoveryPointBasedRestoreRequest, ObjectTypeAzureBackupRecoveryTimeBasedRestoreRequest, ObjectTypeAzureBackupRestoreRequest, ObjectTypeAzureBackupRestoreWithRehydrationRequest} +} + +// ObjectTypeBasicBackupCriteria enumerates the values for object type basic backup criteria. +type ObjectTypeBasicBackupCriteria string + +const ( + // ObjectTypeBackupCriteria ... + ObjectTypeBackupCriteria ObjectTypeBasicBackupCriteria = "BackupCriteria" + // ObjectTypeScheduleBasedBackupCriteria ... + ObjectTypeScheduleBasedBackupCriteria ObjectTypeBasicBackupCriteria = "ScheduleBasedBackupCriteria" +) + +// PossibleObjectTypeBasicBackupCriteriaValues returns an array of possible values for the ObjectTypeBasicBackupCriteria const type. +func PossibleObjectTypeBasicBackupCriteriaValues() []ObjectTypeBasicBackupCriteria { + return []ObjectTypeBasicBackupCriteria{ObjectTypeBackupCriteria, ObjectTypeScheduleBasedBackupCriteria} +} + +// ObjectTypeBasicBackupParameters enumerates the values for object type basic backup parameters. +type ObjectTypeBasicBackupParameters string + +const ( + // ObjectTypeAzureBackupParams ... + ObjectTypeAzureBackupParams ObjectTypeBasicBackupParameters = "AzureBackupParams" + // ObjectTypeBackupParameters ... + ObjectTypeBackupParameters ObjectTypeBasicBackupParameters = "BackupParameters" +) + +// PossibleObjectTypeBasicBackupParametersValues returns an array of possible values for the ObjectTypeBasicBackupParameters const type. +func PossibleObjectTypeBasicBackupParametersValues() []ObjectTypeBasicBackupParameters { + return []ObjectTypeBasicBackupParameters{ObjectTypeAzureBackupParams, ObjectTypeBackupParameters} +} + +// ObjectTypeBasicBaseBackupPolicy enumerates the values for object type basic base backup policy. +type ObjectTypeBasicBaseBackupPolicy string + +const ( + // ObjectTypeBackupPolicy ... + ObjectTypeBackupPolicy ObjectTypeBasicBaseBackupPolicy = "BackupPolicy" + // ObjectTypeBaseBackupPolicy ... + ObjectTypeBaseBackupPolicy ObjectTypeBasicBaseBackupPolicy = "BaseBackupPolicy" +) + +// PossibleObjectTypeBasicBaseBackupPolicyValues returns an array of possible values for the ObjectTypeBasicBaseBackupPolicy const type. +func PossibleObjectTypeBasicBaseBackupPolicyValues() []ObjectTypeBasicBaseBackupPolicy { + return []ObjectTypeBasicBaseBackupPolicy{ObjectTypeBackupPolicy, ObjectTypeBaseBackupPolicy} +} + +// ObjectTypeBasicBasePolicyRule enumerates the values for object type basic base policy rule. +type ObjectTypeBasicBasePolicyRule string + +const ( + // ObjectTypeAzureBackupRule ... + ObjectTypeAzureBackupRule ObjectTypeBasicBasePolicyRule = "AzureBackupRule" + // ObjectTypeAzureRetentionRule ... + ObjectTypeAzureRetentionRule ObjectTypeBasicBasePolicyRule = "AzureRetentionRule" + // ObjectTypeBasePolicyRule ... + ObjectTypeBasePolicyRule ObjectTypeBasicBasePolicyRule = "BasePolicyRule" +) + +// PossibleObjectTypeBasicBasePolicyRuleValues returns an array of possible values for the ObjectTypeBasicBasePolicyRule const type. +func PossibleObjectTypeBasicBasePolicyRuleValues() []ObjectTypeBasicBasePolicyRule { + return []ObjectTypeBasicBasePolicyRule{ObjectTypeAzureBackupRule, ObjectTypeAzureRetentionRule, ObjectTypeBasePolicyRule} +} + +// ObjectTypeBasicCopyOption enumerates the values for object type basic copy option. +type ObjectTypeBasicCopyOption string + +const ( + // ObjectTypeCopyOnExpiryOption ... + ObjectTypeCopyOnExpiryOption ObjectTypeBasicCopyOption = "CopyOnExpiryOption" + // ObjectTypeCopyOption ... + ObjectTypeCopyOption ObjectTypeBasicCopyOption = "CopyOption" + // ObjectTypeCustomCopyOption ... + ObjectTypeCustomCopyOption ObjectTypeBasicCopyOption = "CustomCopyOption" + // ObjectTypeImmediateCopyOption ... + ObjectTypeImmediateCopyOption ObjectTypeBasicCopyOption = "ImmediateCopyOption" +) + +// PossibleObjectTypeBasicCopyOptionValues returns an array of possible values for the ObjectTypeBasicCopyOption const type. +func PossibleObjectTypeBasicCopyOptionValues() []ObjectTypeBasicCopyOption { + return []ObjectTypeBasicCopyOption{ObjectTypeCopyOnExpiryOption, ObjectTypeCopyOption, ObjectTypeCustomCopyOption, ObjectTypeImmediateCopyOption} +} + +// ObjectTypeBasicDataStoreParameters enumerates the values for object type basic data store parameters. +type ObjectTypeBasicDataStoreParameters string + +const ( + // ObjectTypeAzureOperationalStoreParameters ... + ObjectTypeAzureOperationalStoreParameters ObjectTypeBasicDataStoreParameters = "AzureOperationalStoreParameters" + // ObjectTypeDataStoreParameters ... + ObjectTypeDataStoreParameters ObjectTypeBasicDataStoreParameters = "DataStoreParameters" +) + +// PossibleObjectTypeBasicDataStoreParametersValues returns an array of possible values for the ObjectTypeBasicDataStoreParameters const type. +func PossibleObjectTypeBasicDataStoreParametersValues() []ObjectTypeBasicDataStoreParameters { + return []ObjectTypeBasicDataStoreParameters{ObjectTypeAzureOperationalStoreParameters, ObjectTypeDataStoreParameters} +} + +// ObjectTypeBasicDeleteOption enumerates the values for object type basic delete option. +type ObjectTypeBasicDeleteOption string + +const ( + // ObjectTypeAbsoluteDeleteOption ... + ObjectTypeAbsoluteDeleteOption ObjectTypeBasicDeleteOption = "AbsoluteDeleteOption" + // ObjectTypeDeleteOption ... + ObjectTypeDeleteOption ObjectTypeBasicDeleteOption = "DeleteOption" +) + +// PossibleObjectTypeBasicDeleteOptionValues returns an array of possible values for the ObjectTypeBasicDeleteOption const type. +func PossibleObjectTypeBasicDeleteOptionValues() []ObjectTypeBasicDeleteOption { + return []ObjectTypeBasicDeleteOption{ObjectTypeAbsoluteDeleteOption, ObjectTypeDeleteOption} +} + +// ObjectTypeBasicFeatureValidationRequestBase enumerates the values for object type basic feature validation +// request base. +type ObjectTypeBasicFeatureValidationRequestBase string + +const ( + // ObjectTypeFeatureValidationRequest ... + ObjectTypeFeatureValidationRequest ObjectTypeBasicFeatureValidationRequestBase = "FeatureValidationRequest" + // ObjectTypeFeatureValidationRequestBase ... + ObjectTypeFeatureValidationRequestBase ObjectTypeBasicFeatureValidationRequestBase = "FeatureValidationRequestBase" +) + +// PossibleObjectTypeBasicFeatureValidationRequestBaseValues returns an array of possible values for the ObjectTypeBasicFeatureValidationRequestBase const type. +func PossibleObjectTypeBasicFeatureValidationRequestBaseValues() []ObjectTypeBasicFeatureValidationRequestBase { + return []ObjectTypeBasicFeatureValidationRequestBase{ObjectTypeFeatureValidationRequest, ObjectTypeFeatureValidationRequestBase} +} + +// ObjectTypeBasicFeatureValidationResponseBase enumerates the values for object type basic feature validation +// response base. +type ObjectTypeBasicFeatureValidationResponseBase string + +const ( + // ObjectTypeFeatureValidationResponse ... + ObjectTypeFeatureValidationResponse ObjectTypeBasicFeatureValidationResponseBase = "FeatureValidationResponse" + // ObjectTypeFeatureValidationResponseBase ... + ObjectTypeFeatureValidationResponseBase ObjectTypeBasicFeatureValidationResponseBase = "FeatureValidationResponseBase" +) + +// PossibleObjectTypeBasicFeatureValidationResponseBaseValues returns an array of possible values for the ObjectTypeBasicFeatureValidationResponseBase const type. +func PossibleObjectTypeBasicFeatureValidationResponseBaseValues() []ObjectTypeBasicFeatureValidationResponseBase { + return []ObjectTypeBasicFeatureValidationResponseBase{ObjectTypeFeatureValidationResponse, ObjectTypeFeatureValidationResponseBase} +} + +// ObjectTypeBasicItemLevelRestoreCriteria enumerates the values for object type basic item level restore +// criteria. +type ObjectTypeBasicItemLevelRestoreCriteria string + +const ( + // ObjectTypeItemLevelRestoreCriteria ... + ObjectTypeItemLevelRestoreCriteria ObjectTypeBasicItemLevelRestoreCriteria = "ItemLevelRestoreCriteria" + // ObjectTypeRangeBasedItemLevelRestoreCriteria ... + ObjectTypeRangeBasedItemLevelRestoreCriteria ObjectTypeBasicItemLevelRestoreCriteria = "RangeBasedItemLevelRestoreCriteria" +) + +// PossibleObjectTypeBasicItemLevelRestoreCriteriaValues returns an array of possible values for the ObjectTypeBasicItemLevelRestoreCriteria const type. +func PossibleObjectTypeBasicItemLevelRestoreCriteriaValues() []ObjectTypeBasicItemLevelRestoreCriteria { + return []ObjectTypeBasicItemLevelRestoreCriteria{ObjectTypeItemLevelRestoreCriteria, ObjectTypeRangeBasedItemLevelRestoreCriteria} +} + +// ObjectTypeBasicRestoreTargetInfoBase enumerates the values for object type basic restore target info base. +type ObjectTypeBasicRestoreTargetInfoBase string + +const ( + // ObjectTypeItemLevelRestoreTargetInfo ... + ObjectTypeItemLevelRestoreTargetInfo ObjectTypeBasicRestoreTargetInfoBase = "ItemLevelRestoreTargetInfo" + // ObjectTypeRestoreFilesTargetInfo ... + ObjectTypeRestoreFilesTargetInfo ObjectTypeBasicRestoreTargetInfoBase = "RestoreFilesTargetInfo" + // ObjectTypeRestoreTargetInfo ... + ObjectTypeRestoreTargetInfo ObjectTypeBasicRestoreTargetInfoBase = "RestoreTargetInfo" + // ObjectTypeRestoreTargetInfoBase ... + ObjectTypeRestoreTargetInfoBase ObjectTypeBasicRestoreTargetInfoBase = "RestoreTargetInfoBase" +) + +// PossibleObjectTypeBasicRestoreTargetInfoBaseValues returns an array of possible values for the ObjectTypeBasicRestoreTargetInfoBase const type. +func PossibleObjectTypeBasicRestoreTargetInfoBaseValues() []ObjectTypeBasicRestoreTargetInfoBase { + return []ObjectTypeBasicRestoreTargetInfoBase{ObjectTypeItemLevelRestoreTargetInfo, ObjectTypeRestoreFilesTargetInfo, ObjectTypeRestoreTargetInfo, ObjectTypeRestoreTargetInfoBase} +} + +// ObjectTypeBasicTriggerContext enumerates the values for object type basic trigger context. +type ObjectTypeBasicTriggerContext string + +const ( + // ObjectTypeAdhocBasedTriggerContext ... + ObjectTypeAdhocBasedTriggerContext ObjectTypeBasicTriggerContext = "AdhocBasedTriggerContext" + // ObjectTypeScheduleBasedTriggerContext ... + ObjectTypeScheduleBasedTriggerContext ObjectTypeBasicTriggerContext = "ScheduleBasedTriggerContext" + // ObjectTypeTriggerContext ... + ObjectTypeTriggerContext ObjectTypeBasicTriggerContext = "TriggerContext" +) + +// PossibleObjectTypeBasicTriggerContextValues returns an array of possible values for the ObjectTypeBasicTriggerContext const type. +func PossibleObjectTypeBasicTriggerContextValues() []ObjectTypeBasicTriggerContext { + return []ObjectTypeBasicTriggerContext{ObjectTypeAdhocBasedTriggerContext, ObjectTypeScheduleBasedTriggerContext, ObjectTypeTriggerContext} +} + +// ProvisioningState enumerates the values for provisioning state. +type ProvisioningState string + +const ( + // Failed ... + Failed ProvisioningState = "Failed" + // Provisioning ... + Provisioning ProvisioningState = "Provisioning" + // Succeeded ... + Succeeded ProvisioningState = "Succeeded" + // Unknown ... + Unknown ProvisioningState = "Unknown" + // Updating ... + Updating ProvisioningState = "Updating" +) + +// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type. +func PossibleProvisioningStateValues() []ProvisioningState { + return []ProvisioningState{Failed, Provisioning, Succeeded, Unknown, Updating} +} + +// RehydrationPriority enumerates the values for rehydration priority. +type RehydrationPriority string + +const ( + // RehydrationPriorityHigh ... + RehydrationPriorityHigh RehydrationPriority = "High" + // RehydrationPriorityInvalid ... + RehydrationPriorityInvalid RehydrationPriority = "Invalid" + // RehydrationPriorityStandard ... + RehydrationPriorityStandard RehydrationPriority = "Standard" +) + +// PossibleRehydrationPriorityValues returns an array of possible values for the RehydrationPriority const type. +func PossibleRehydrationPriorityValues() []RehydrationPriority { + return []RehydrationPriority{RehydrationPriorityHigh, RehydrationPriorityInvalid, RehydrationPriorityStandard} +} + +// RehydrationStatus enumerates the values for rehydration status. +type RehydrationStatus string + +const ( + // COMPLETED ... + COMPLETED RehydrationStatus = "COMPLETED" + // CREATEINPROGRESS ... + CREATEINPROGRESS RehydrationStatus = "CREATE_IN_PROGRESS" + // DELETED ... + DELETED RehydrationStatus = "DELETED" + // DELETEINPROGRESS ... + DELETEINPROGRESS RehydrationStatus = "DELETE_IN_PROGRESS" + // FAILED ... + FAILED RehydrationStatus = "FAILED" +) + +// PossibleRehydrationStatusValues returns an array of possible values for the RehydrationStatus const type. +func PossibleRehydrationStatusValues() []RehydrationStatus { + return []RehydrationStatus{COMPLETED, CREATEINPROGRESS, DELETED, DELETEINPROGRESS, FAILED} +} + +// RestoreSourceDataStoreType enumerates the values for restore source data store type. +type RestoreSourceDataStoreType string + +const ( + // RestoreSourceDataStoreTypeArchiveStore ... + RestoreSourceDataStoreTypeArchiveStore RestoreSourceDataStoreType = "ArchiveStore" + // RestoreSourceDataStoreTypeOperationalStore ... + RestoreSourceDataStoreTypeOperationalStore RestoreSourceDataStoreType = "OperationalStore" + // RestoreSourceDataStoreTypeVaultStore ... + RestoreSourceDataStoreTypeVaultStore RestoreSourceDataStoreType = "VaultStore" +) + +// PossibleRestoreSourceDataStoreTypeValues returns an array of possible values for the RestoreSourceDataStoreType const type. +func PossibleRestoreSourceDataStoreTypeValues() []RestoreSourceDataStoreType { + return []RestoreSourceDataStoreType{RestoreSourceDataStoreTypeArchiveStore, RestoreSourceDataStoreTypeOperationalStore, RestoreSourceDataStoreTypeVaultStore} +} + +// RestoreTargetLocationType enumerates the values for restore target location type. +type RestoreTargetLocationType string + +const ( + // RestoreTargetLocationTypeAzureBlobs ... + RestoreTargetLocationTypeAzureBlobs RestoreTargetLocationType = "AzureBlobs" + // RestoreTargetLocationTypeAzureFiles ... + RestoreTargetLocationTypeAzureFiles RestoreTargetLocationType = "AzureFiles" + // RestoreTargetLocationTypeInvalid ... + RestoreTargetLocationTypeInvalid RestoreTargetLocationType = "Invalid" +) + +// PossibleRestoreTargetLocationTypeValues returns an array of possible values for the RestoreTargetLocationType const type. +func PossibleRestoreTargetLocationTypeValues() []RestoreTargetLocationType { + return []RestoreTargetLocationType{RestoreTargetLocationTypeAzureBlobs, RestoreTargetLocationTypeAzureFiles, RestoreTargetLocationTypeInvalid} +} + +// SourceDataStoreType enumerates the values for source data store type. +type SourceDataStoreType string + +const ( + // SourceDataStoreTypeArchiveStore ... + SourceDataStoreTypeArchiveStore SourceDataStoreType = "ArchiveStore" + // SourceDataStoreTypeSnapshotStore ... + SourceDataStoreTypeSnapshotStore SourceDataStoreType = "SnapshotStore" + // SourceDataStoreTypeVaultStore ... + SourceDataStoreTypeVaultStore SourceDataStoreType = "VaultStore" +) + +// PossibleSourceDataStoreTypeValues returns an array of possible values for the SourceDataStoreType const type. +func PossibleSourceDataStoreTypeValues() []SourceDataStoreType { + return []SourceDataStoreType{SourceDataStoreTypeArchiveStore, SourceDataStoreTypeSnapshotStore, SourceDataStoreTypeVaultStore} +} + +// Status enumerates the values for status. +type Status string + +const ( + // StatusConfiguringProtection ... + StatusConfiguringProtection Status = "ConfiguringProtection" + // StatusConfiguringProtectionFailed ... + StatusConfiguringProtectionFailed Status = "ConfiguringProtectionFailed" + // StatusProtectionConfigured ... + StatusProtectionConfigured Status = "ProtectionConfigured" + // StatusProtectionStopped ... + StatusProtectionStopped Status = "ProtectionStopped" + // StatusSoftDeleted ... + StatusSoftDeleted Status = "SoftDeleted" + // StatusSoftDeleting ... + StatusSoftDeleting Status = "SoftDeleting" +) + +// PossibleStatusValues returns an array of possible values for the Status const type. +func PossibleStatusValues() []Status { + return []Status{StatusConfiguringProtection, StatusConfiguringProtectionFailed, StatusProtectionConfigured, StatusProtectionStopped, StatusSoftDeleted, StatusSoftDeleting} +} + +// StorageSettingStoreTypes enumerates the values for storage setting store types. +type StorageSettingStoreTypes string + +const ( + // StorageSettingStoreTypesArchiveStore ... + StorageSettingStoreTypesArchiveStore StorageSettingStoreTypes = "ArchiveStore" + // StorageSettingStoreTypesSnapshotStore ... + StorageSettingStoreTypesSnapshotStore StorageSettingStoreTypes = "SnapshotStore" + // StorageSettingStoreTypesVaultStore ... + StorageSettingStoreTypesVaultStore StorageSettingStoreTypes = "VaultStore" +) + +// PossibleStorageSettingStoreTypesValues returns an array of possible values for the StorageSettingStoreTypes const type. +func PossibleStorageSettingStoreTypesValues() []StorageSettingStoreTypes { + return []StorageSettingStoreTypes{StorageSettingStoreTypesArchiveStore, StorageSettingStoreTypesSnapshotStore, StorageSettingStoreTypesVaultStore} +} + +// StorageSettingTypes enumerates the values for storage setting types. +type StorageSettingTypes string + +const ( + // GeoRedundant ... + GeoRedundant StorageSettingTypes = "GeoRedundant" + // LocallyRedundant ... + LocallyRedundant StorageSettingTypes = "LocallyRedundant" +) + +// PossibleStorageSettingTypesValues returns an array of possible values for the StorageSettingTypes const type. +func PossibleStorageSettingTypesValues() []StorageSettingTypes { + return []StorageSettingTypes{GeoRedundant, LocallyRedundant} +} + +// WeekNumber enumerates the values for week number. +type WeekNumber string + +const ( + // First ... + First WeekNumber = "First" + // Fourth ... + Fourth WeekNumber = "Fourth" + // Last ... + Last WeekNumber = "Last" + // Second ... + Second WeekNumber = "Second" + // Third ... + Third WeekNumber = "Third" +) + +// PossibleWeekNumberValues returns an array of possible values for the WeekNumber const type. +func PossibleWeekNumberValues() []WeekNumber { + return []WeekNumber{First, Fourth, Last, Second, Third} +} diff --git a/azurerm/internal/services/dataprotection/legacysdk/dataprotection/exportjobs.go b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/exportjobs.go new file mode 100644 index 000000000000..2d3365e55206 --- /dev/null +++ b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/exportjobs.go @@ -0,0 +1,109 @@ +package dataprotection + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" +) + +// ExportJobsClient is the open API 2.0 Specs for Azure Data Protection service +type ExportJobsClient struct { + BaseClient +} + +// NewExportJobsClient creates an instance of the ExportJobsClient client. +func NewExportJobsClient(subscriptionID string) ExportJobsClient { + return NewExportJobsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewExportJobsClientWithBaseURI creates an instance of the ExportJobsClient client using a custom endpoint. Use this +// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewExportJobsClientWithBaseURI(baseURI string, subscriptionID string) ExportJobsClient { + return ExportJobsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Trigger triggers export of jobs and returns an OperationID to track. +// Parameters: +// resourceGroupName - the name of the resource group where the backup vault is present. +// vaultName - the name of the backup vault. +func (client ExportJobsClient) Trigger(ctx context.Context, resourceGroupName string, vaultName string) (result ExportJobsTriggerFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExportJobsClient.Trigger") + defer func() { + sc := -1 + if result.FutureAPI != nil && result.FutureAPI.Response() != nil { + sc = result.FutureAPI.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.TriggerPreparer(ctx, resourceGroupName, vaultName) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.ExportJobsClient", "Trigger", nil, "Failure preparing request") + return + } + + result, err = client.TriggerSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.ExportJobsClient", "Trigger", nil, "Failure sending request") + return + } + + return +} + +// TriggerPreparer prepares the Trigger request. +func (client ExportJobsClient) TriggerPreparer(ctx context.Context, resourceGroupName string, vaultName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}/exportBackupJobs", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// TriggerSender sends the Trigger request. The method will close the +// http.Response Body if it receives an error. +func (client ExportJobsClient) TriggerSender(req *http.Request) (future ExportJobsTriggerFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + var azf azure.Future + azf, err = azure.NewFutureFromResponse(resp) + future.FutureAPI = &azf + future.Result = future.result + return +} + +// TriggerResponder handles the response to the Trigger request. The method always +// closes the http.Response Body. +func (client ExportJobsClient) TriggerResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/azurerm/internal/services/dataprotection/legacysdk/dataprotection/exportjobsoperationresult.go b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/exportjobsoperationresult.go new file mode 100644 index 000000000000..df766de22f93 --- /dev/null +++ b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/exportjobsoperationresult.go @@ -0,0 +1,113 @@ +package dataprotection + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" +) + +// ExportJobsOperationResultClient is the open API 2.0 Specs for Azure Data Protection service +type ExportJobsOperationResultClient struct { + BaseClient +} + +// NewExportJobsOperationResultClient creates an instance of the ExportJobsOperationResultClient client. +func NewExportJobsOperationResultClient(subscriptionID string) ExportJobsOperationResultClient { + return NewExportJobsOperationResultClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewExportJobsOperationResultClientWithBaseURI creates an instance of the ExportJobsOperationResultClient client +// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign +// clouds, Azure stack). +func NewExportJobsOperationResultClientWithBaseURI(baseURI string, subscriptionID string) ExportJobsOperationResultClient { + return ExportJobsOperationResultClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets the operation result of operation triggered by Export Jobs API. If the operation is successful, then it +// also contains URL of a Blob and a SAS key to access the same. The blob contains exported jobs in JSON serialized +// format. +// Parameters: +// resourceGroupName - the name of the resource group where the backup vault is present. +// vaultName - the name of the backup vault. +// operationID - operationID which represents the export job. +func (client ExportJobsOperationResultClient) Get(ctx context.Context, resourceGroupName string, vaultName string, operationID string) (result ExportJobsResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExportJobsOperationResultClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, vaultName, operationID) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.ExportJobsOperationResultClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dataprotection.ExportJobsOperationResultClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.ExportJobsOperationResultClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client ExportJobsOperationResultClient) GetPreparer(ctx context.Context, resourceGroupName string, vaultName string, operationID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "operationId": autorest.Encode("path", operationID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}/backupJobs/operations/{operationId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ExportJobsOperationResultClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ExportJobsOperationResultClient) GetResponder(resp *http.Response) (result ExportJobsResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/azurerm/internal/services/dataprotection/legacysdk/dataprotection/findrestorabletimeranges.go b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/findrestorabletimeranges.go new file mode 100644 index 000000000000..c97460707579 --- /dev/null +++ b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/findrestorabletimeranges.go @@ -0,0 +1,121 @@ +package dataprotection + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" +) + +// FindRestorableTimeRangesClient is the open API 2.0 Specs for Azure Data Protection service +type FindRestorableTimeRangesClient struct { + BaseClient +} + +// NewFindRestorableTimeRangesClient creates an instance of the FindRestorableTimeRangesClient client. +func NewFindRestorableTimeRangesClient(subscriptionID string) FindRestorableTimeRangesClient { + return NewFindRestorableTimeRangesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewFindRestorableTimeRangesClientWithBaseURI creates an instance of the FindRestorableTimeRangesClient client using +// a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign +// clouds, Azure stack). +func NewFindRestorableTimeRangesClientWithBaseURI(baseURI string, subscriptionID string) FindRestorableTimeRangesClient { + return FindRestorableTimeRangesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Post sends the post request. +// Parameters: +// vaultName - the name of the backup vault. +// resourceGroupName - the name of the resource group where the backup vault is present. +// parameters - request body for operation +func (client FindRestorableTimeRangesClient) Post(ctx context.Context, vaultName string, resourceGroupName string, backupInstances string, parameters AzureBackupFindRestorableTimeRangesRequest) (result AzureBackupFindRestorableTimeRangesResponseResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/FindRestorableTimeRangesClient.Post") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.StartTime", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.EndTime", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("dataprotection.FindRestorableTimeRangesClient", "Post", err.Error()) + } + + req, err := client.PostPreparer(ctx, vaultName, resourceGroupName, backupInstances, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.FindRestorableTimeRangesClient", "Post", nil, "Failure preparing request") + return + } + + resp, err := client.PostSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dataprotection.FindRestorableTimeRangesClient", "Post", resp, "Failure sending request") + return + } + + result, err = client.PostResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.FindRestorableTimeRangesClient", "Post", resp, "Failure responding to request") + return + } + + return +} + +// PostPreparer prepares the Post request. +func (client FindRestorableTimeRangesClient) PostPreparer(ctx context.Context, vaultName string, resourceGroupName string, backupInstances string, parameters AzureBackupFindRestorableTimeRangesRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "backupInstances": autorest.Encode("path", backupInstances), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}/backupInstances/{backupInstances}/findRestorableTimeRanges", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PostSender sends the Post request. The method will close the +// http.Response Body if it receives an error. +func (client FindRestorableTimeRangesClient) PostSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// PostResponder handles the response to the Post request. The method always +// closes the http.Response Body. +func (client FindRestorableTimeRangesClient) PostResponder(resp *http.Response) (result AzureBackupFindRestorableTimeRangesResponseResource, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/azurerm/internal/services/dataprotection/legacysdk/dataprotection/job.go b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/job.go new file mode 100644 index 000000000000..1e20b9f3e0d7 --- /dev/null +++ b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/job.go @@ -0,0 +1,110 @@ +package dataprotection + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" +) + +// JobClient is the open API 2.0 Specs for Azure Data Protection service +type JobClient struct { + BaseClient +} + +// NewJobClient creates an instance of the JobClient client. +func NewJobClient(subscriptionID string) JobClient { + return NewJobClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewJobClientWithBaseURI creates an instance of the JobClient client using a custom endpoint. Use this when +// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewJobClientWithBaseURI(baseURI string, subscriptionID string) JobClient { + return JobClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets a job with id in a backup vault +// Parameters: +// resourceGroupName - the name of the resource group where the backup vault is present. +// vaultName - the name of the backup vault. +// jobID - the Job ID. This is a GUID-formatted string (e.g. 00000000-0000-0000-0000-000000000000). +func (client JobClient) Get(ctx context.Context, resourceGroupName string, vaultName string, jobID string) (result AzureBackupJobResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, vaultName, jobID) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.JobClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dataprotection.JobClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.JobClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client JobClient) GetPreparer(ctx context.Context, resourceGroupName string, vaultName string, jobID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobId": autorest.Encode("path", jobID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}/backupJobs/{jobId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client JobClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client JobClient) GetResponder(resp *http.Response) (result AzureBackupJobResource, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/azurerm/internal/services/dataprotection/legacysdk/dataprotection/jobs.go b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/jobs.go new file mode 100644 index 000000000000..655c6617438a --- /dev/null +++ b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/jobs.go @@ -0,0 +1,150 @@ +package dataprotection + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" +) + +// JobsClient is the open API 2.0 Specs for Azure Data Protection service +type JobsClient struct { + BaseClient +} + +// NewJobsClient creates an instance of the JobsClient client. +func NewJobsClient(subscriptionID string) JobsClient { + return NewJobsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewJobsClientWithBaseURI creates an instance of the JobsClient client using a custom endpoint. Use this when +// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewJobsClientWithBaseURI(baseURI string, subscriptionID string) JobsClient { + return JobsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List returns list of jobs belonging to a backup vault +// Parameters: +// resourceGroupName - the name of the resource group where the backup vault is present. +// vaultName - the name of the backup vault. +func (client JobsClient) List(ctx context.Context, resourceGroupName string, vaultName string) (result AzureBackupJobResourceListPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobsClient.List") + defer func() { + sc := -1 + if result.abjrl.Response.Response != nil { + sc = result.abjrl.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, vaultName) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.JobsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.abjrl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dataprotection.JobsClient", "List", resp, "Failure sending request") + return + } + + result.abjrl, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.JobsClient", "List", resp, "Failure responding to request") + return + } + if result.abjrl.hasNextLink() && result.abjrl.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListPreparer prepares the List request. +func (client JobsClient) ListPreparer(ctx context.Context, resourceGroupName string, vaultName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}/backupJobs", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client JobsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client JobsClient) ListResponder(resp *http.Response) (result AzureBackupJobResourceList, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client JobsClient) listNextResults(ctx context.Context, lastResults AzureBackupJobResourceList) (result AzureBackupJobResourceList, err error) { + req, err := lastResults.azureBackupJobResourceListPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "dataprotection.JobsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "dataprotection.JobsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.JobsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client JobsClient) ListComplete(ctx context.Context, resourceGroupName string, vaultName string) (result AzureBackupJobResourceListIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/JobsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, vaultName) + return +} diff --git a/azurerm/internal/services/dataprotection/legacysdk/dataprotection/models.go b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/models.go new file mode 100644 index 000000000000..7255eb0214ad --- /dev/null +++ b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/models.go @@ -0,0 +1,5512 @@ +package dataprotection + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/json" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/go-autorest/tracing" +) + +// The package's fully qualified name. +const fqdn = "github.com/Azure/azure-sdk-for-go/services/dataprotection/mgmt/2021-01-01/dataprotection" + +// AbsoluteDeleteOption delete option with duration +type AbsoluteDeleteOption struct { + // Duration - Duration of deletion after given timespan + Duration *string `json:"duration,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeDeleteOption', 'ObjectTypeAbsoluteDeleteOption' + ObjectType ObjectTypeBasicDeleteOption `json:"objectType,omitempty"` +} + +// MarshalJSON is the custom marshaler for AbsoluteDeleteOption. +func (ado AbsoluteDeleteOption) MarshalJSON() ([]byte, error) { + ado.ObjectType = ObjectTypeAbsoluteDeleteOption + objectMap := make(map[string]interface{}) + if ado.Duration != nil { + objectMap["duration"] = ado.Duration + } + if ado.ObjectType != "" { + objectMap["objectType"] = ado.ObjectType + } + return json.Marshal(objectMap) +} + +// AsAbsoluteDeleteOption is the BasicDeleteOption implementation for AbsoluteDeleteOption. +func (ado AbsoluteDeleteOption) AsAbsoluteDeleteOption() (*AbsoluteDeleteOption, bool) { + return &ado, true +} + +// AsDeleteOption is the BasicDeleteOption implementation for AbsoluteDeleteOption. +func (ado AbsoluteDeleteOption) AsDeleteOption() (*DeleteOption, bool) { + return nil, false +} + +// AsBasicDeleteOption is the BasicDeleteOption implementation for AbsoluteDeleteOption. +func (ado AbsoluteDeleteOption) AsBasicDeleteOption() (BasicDeleteOption, bool) { + return &ado, true +} + +// AdHocBackupRuleOptions adhoc backup rules +type AdHocBackupRuleOptions struct { + RuleName *string `json:"ruleName,omitempty"` + TriggerOption *AdhocBackupTriggerOption `json:"triggerOption,omitempty"` +} + +// AdhocBackupTriggerOption adhoc backup trigger option +type AdhocBackupTriggerOption struct { + RetentionTagOverride *string `json:"retentionTagOverride,omitempty"` +} + +// AdhocBasedTaggingCriteria adhoc backup tagging criteria +type AdhocBasedTaggingCriteria struct { + // TagInfo - Retention tag information + TagInfo *RetentionTag `json:"tagInfo,omitempty"` +} + +// AdhocBasedTriggerContext adhoc trigger context +type AdhocBasedTriggerContext struct { + // TaggingCriteria - Tagging Criteria containing retention tag for adhoc backup. + TaggingCriteria *AdhocBasedTaggingCriteria `json:"taggingCriteria,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeTriggerContext', 'ObjectTypeAdhocBasedTriggerContext', 'ObjectTypeScheduleBasedTriggerContext' + ObjectType ObjectTypeBasicTriggerContext `json:"objectType,omitempty"` +} + +// MarshalJSON is the custom marshaler for AdhocBasedTriggerContext. +func (abtc AdhocBasedTriggerContext) MarshalJSON() ([]byte, error) { + abtc.ObjectType = ObjectTypeAdhocBasedTriggerContext + objectMap := make(map[string]interface{}) + if abtc.TaggingCriteria != nil { + objectMap["taggingCriteria"] = abtc.TaggingCriteria + } + if abtc.ObjectType != "" { + objectMap["objectType"] = abtc.ObjectType + } + return json.Marshal(objectMap) +} + +// AsAdhocBasedTriggerContext is the BasicTriggerContext implementation for AdhocBasedTriggerContext. +func (abtc AdhocBasedTriggerContext) AsAdhocBasedTriggerContext() (*AdhocBasedTriggerContext, bool) { + return &abtc, true +} + +// AsScheduleBasedTriggerContext is the BasicTriggerContext implementation for AdhocBasedTriggerContext. +func (abtc AdhocBasedTriggerContext) AsScheduleBasedTriggerContext() (*ScheduleBasedTriggerContext, bool) { + return nil, false +} + +// AsTriggerContext is the BasicTriggerContext implementation for AdhocBasedTriggerContext. +func (abtc AdhocBasedTriggerContext) AsTriggerContext() (*TriggerContext, bool) { + return nil, false +} + +// AsBasicTriggerContext is the BasicTriggerContext implementation for AdhocBasedTriggerContext. +func (abtc AdhocBasedTriggerContext) AsBasicTriggerContext() (BasicTriggerContext, bool) { + return &abtc, true +} + +// AzureBackupDiscreteRecoveryPoint azure backup discrete RecoveryPoint +type AzureBackupDiscreteRecoveryPoint struct { + FriendlyName *string `json:"friendlyName,omitempty"` + RecoveryPointDataStoresDetails *[]RecoveryPointDataStoreDetails `json:"recoveryPointDataStoresDetails,omitempty"` + RecoveryPointTime *date.Time `json:"recoveryPointTime,omitempty"` + PolicyName *string `json:"policyName,omitempty"` + PolicyVersion *string `json:"policyVersion,omitempty"` + RecoveryPointID *string `json:"recoveryPointId,omitempty"` + RecoveryPointType *string `json:"recoveryPointType,omitempty"` + RetentionTagName *string `json:"retentionTagName,omitempty"` + RetentionTagVersion *string `json:"retentionTagVersion,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeAzureBackupRecoveryPoint', 'ObjectTypeAzureBackupDiscreteRecoveryPoint' + ObjectType ObjectType `json:"objectType,omitempty"` +} + +// MarshalJSON is the custom marshaler for AzureBackupDiscreteRecoveryPoint. +func (abdrp AzureBackupDiscreteRecoveryPoint) MarshalJSON() ([]byte, error) { + abdrp.ObjectType = ObjectTypeAzureBackupDiscreteRecoveryPoint + objectMap := make(map[string]interface{}) + if abdrp.FriendlyName != nil { + objectMap["friendlyName"] = abdrp.FriendlyName + } + if abdrp.RecoveryPointDataStoresDetails != nil { + objectMap["recoveryPointDataStoresDetails"] = abdrp.RecoveryPointDataStoresDetails + } + if abdrp.RecoveryPointTime != nil { + objectMap["recoveryPointTime"] = abdrp.RecoveryPointTime + } + if abdrp.PolicyName != nil { + objectMap["policyName"] = abdrp.PolicyName + } + if abdrp.PolicyVersion != nil { + objectMap["policyVersion"] = abdrp.PolicyVersion + } + if abdrp.RecoveryPointID != nil { + objectMap["recoveryPointId"] = abdrp.RecoveryPointID + } + if abdrp.RecoveryPointType != nil { + objectMap["recoveryPointType"] = abdrp.RecoveryPointType + } + if abdrp.RetentionTagName != nil { + objectMap["retentionTagName"] = abdrp.RetentionTagName + } + if abdrp.RetentionTagVersion != nil { + objectMap["retentionTagVersion"] = abdrp.RetentionTagVersion + } + if abdrp.ObjectType != "" { + objectMap["objectType"] = abdrp.ObjectType + } + return json.Marshal(objectMap) +} + +// AsAzureBackupDiscreteRecoveryPoint is the BasicAzureBackupRecoveryPoint implementation for AzureBackupDiscreteRecoveryPoint. +func (abdrp AzureBackupDiscreteRecoveryPoint) AsAzureBackupDiscreteRecoveryPoint() (*AzureBackupDiscreteRecoveryPoint, bool) { + return &abdrp, true +} + +// AsAzureBackupRecoveryPoint is the BasicAzureBackupRecoveryPoint implementation for AzureBackupDiscreteRecoveryPoint. +func (abdrp AzureBackupDiscreteRecoveryPoint) AsAzureBackupRecoveryPoint() (*AzureBackupRecoveryPoint, bool) { + return nil, false +} + +// AsBasicAzureBackupRecoveryPoint is the BasicAzureBackupRecoveryPoint implementation for AzureBackupDiscreteRecoveryPoint. +func (abdrp AzureBackupDiscreteRecoveryPoint) AsBasicAzureBackupRecoveryPoint() (BasicAzureBackupRecoveryPoint, bool) { + return &abdrp, true +} + +// AzureBackupFindRestorableTimeRangesRequest list Restore Ranges Request +type AzureBackupFindRestorableTimeRangesRequest struct { + // SourceDataStoreType - Gets or sets the type of the source data store. Possible values include: 'RestoreSourceDataStoreTypeOperationalStore', 'RestoreSourceDataStoreTypeVaultStore', 'RestoreSourceDataStoreTypeArchiveStore' + SourceDataStoreType RestoreSourceDataStoreType `json:"sourceDataStoreType,omitempty"` + // StartTime - Start time for the List Restore Ranges request + StartTime *string `json:"startTime,omitempty"` + // EndTime - End time for the List Restore Ranges request + EndTime *string `json:"endTime,omitempty"` +} + +// AzureBackupFindRestorableTimeRangesRequestResource list Restore Ranges Request +type AzureBackupFindRestorableTimeRangesRequestResource struct { + // Content - AzureBackupFindRestorableTimeRangesRequestResource content + Content *AzureBackupFindRestorableTimeRangesRequest `json:"content,omitempty"` + SubscriptionID *string `json:"subscriptionId,omitempty"` + URI *string `json:"uri,omitempty"` + Headers map[string][]string `json:"headers"` + SupportedGroupVersions *[]string `json:"supportedGroupVersions,omitempty"` + CultureInfo *string `json:"cultureInfo,omitempty"` + Parameters map[string]*string `json:"parameters"` + HTTPMethod *string `json:"httpMethod,omitempty"` +} + +// MarshalJSON is the custom marshaler for AzureBackupFindRestorableTimeRangesRequestResource. +func (abfrtrrr AzureBackupFindRestorableTimeRangesRequestResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if abfrtrrr.Content != nil { + objectMap["content"] = abfrtrrr.Content + } + if abfrtrrr.SubscriptionID != nil { + objectMap["subscriptionId"] = abfrtrrr.SubscriptionID + } + if abfrtrrr.URI != nil { + objectMap["uri"] = abfrtrrr.URI + } + if abfrtrrr.Headers != nil { + objectMap["headers"] = abfrtrrr.Headers + } + if abfrtrrr.SupportedGroupVersions != nil { + objectMap["supportedGroupVersions"] = abfrtrrr.SupportedGroupVersions + } + if abfrtrrr.CultureInfo != nil { + objectMap["cultureInfo"] = abfrtrrr.CultureInfo + } + if abfrtrrr.Parameters != nil { + objectMap["parameters"] = abfrtrrr.Parameters + } + if abfrtrrr.HTTPMethod != nil { + objectMap["httpMethod"] = abfrtrrr.HTTPMethod + } + return json.Marshal(objectMap) +} + +// AzureBackupFindRestorableTimeRangesResponse list Restore Ranges Response +type AzureBackupFindRestorableTimeRangesResponse struct { + // RestorableTimeRanges - Returns the Restore Ranges available on the Backup Instance. + RestorableTimeRanges *[]RestorableTimeRange `json:"restorableTimeRanges,omitempty"` + ObjectType *string `json:"objectType,omitempty"` +} + +// AzureBackupFindRestorableTimeRangesResponseResource list Restore Ranges Response +type AzureBackupFindRestorableTimeRangesResponseResource struct { + autorest.Response `json:"-"` + // Properties - AzureBackupFindRestorableTimeRangesResponseResource properties + Properties *AzureBackupFindRestorableTimeRangesResponse `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id represents the complete path to the resource. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name associated with the resource. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type represents the complete path of the form Namespace/ResourceType/ResourceType/... + Type *string `json:"type,omitempty"` + SystemData *SystemData `json:"systemData,omitempty"` +} + +// MarshalJSON is the custom marshaler for AzureBackupFindRestorableTimeRangesResponseResource. +func (abfrtrrr AzureBackupFindRestorableTimeRangesResponseResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if abfrtrrr.Properties != nil { + objectMap["properties"] = abfrtrrr.Properties + } + if abfrtrrr.SystemData != nil { + objectMap["systemData"] = abfrtrrr.SystemData + } + return json.Marshal(objectMap) +} + +// AzureBackupJob azureBackup Job Class +type AzureBackupJob struct { + // ActivityID - Job Activity Id + ActivityID *string `json:"activityID,omitempty"` + // BackupInstanceFriendlyName - Name of the Backup Instance + BackupInstanceFriendlyName *string `json:"backupInstanceFriendlyName,omitempty"` + // BackupInstanceID - READ-ONLY; ARM ID of the Backup Instance + BackupInstanceID *string `json:"backupInstanceId,omitempty"` + // DataSourceID - ARM ID of the DataSource + DataSourceID *string `json:"dataSourceId,omitempty"` + // DataSourceLocation - Location of the DataSource + DataSourceLocation *string `json:"dataSourceLocation,omitempty"` + // DataSourceName - User Friendly Name of the DataSource + DataSourceName *string `json:"dataSourceName,omitempty"` + // DataSourceSetName - Data Source Set Name of the DataSource + DataSourceSetName *string `json:"dataSourceSetName,omitempty"` + // DataSourceType - Type of DataSource + DataSourceType *string `json:"dataSourceType,omitempty"` + // Duration - Total run time of the job. ISO 8601 format. + Duration *string `json:"duration,omitempty"` + // EndTime - READ-ONLY; EndTime of the job(in UTC) + EndTime *date.Time `json:"endTime,omitempty"` + // ErrorDetails - READ-ONLY; A List, detailing the errors related to the job + ErrorDetails *[]UserFacingError `json:"errorDetails,omitempty"` + // ExtendedInfo - READ-ONLY; Extended Information about the job + ExtendedInfo *JobExtendedInfo `json:"extendedInfo,omitempty"` + // IsUserTriggered - Indicated that whether the job is adhoc(true) or scheduled(false) + IsUserTriggered *bool `json:"isUserTriggered,omitempty"` + // Operation - It indicates the type of Job i.e. Backup:full/log/diff ;Restore:ALR/OLR; Tiering:Backup/Archive ; Management:ConfigureProtection/UnConfigure + Operation *string `json:"operation,omitempty"` + // OperationCategory - It indicates the type of Job i.e. Backup/Restore/Tiering/Management + OperationCategory *string `json:"operationCategory,omitempty"` + // PolicyID - READ-ONLY; ARM ID of the policy + PolicyID *string `json:"policyId,omitempty"` + // PolicyName - READ-ONLY; Name of the policy + PolicyName *string `json:"policyName,omitempty"` + // ProgressEnabled - Indicated whether progress is enabled for the job + ProgressEnabled *bool `json:"progressEnabled,omitempty"` + // ProgressURL - READ-ONLY; Url which contains job's progress + ProgressURL *string `json:"progressUrl,omitempty"` + // RestoreType - READ-ONLY; It indicates the sub type of operation i.e. in case of Restore it can be ALR/OLR + RestoreType *string `json:"restoreType,omitempty"` + // SourceResourceGroup - Resource Group Name of the Datasource + SourceResourceGroup *string `json:"sourceResourceGroup,omitempty"` + // SourceSubscriptionID - SubscriptionId corresponding to the DataSource + SourceSubscriptionID *string `json:"sourceSubscriptionID,omitempty"` + // StartTime - StartTime of the job(in UTC) + StartTime *date.Time `json:"startTime,omitempty"` + // Status - Status of the job like InProgress/Success/Failed/Cancelled/SuccessWithWarning + Status *string `json:"status,omitempty"` + // SubscriptionID - Subscription Id of the corresponding backup vault + SubscriptionID *string `json:"subscriptionId,omitempty"` + // SupportedActions - List of supported actions + SupportedActions *[]string `json:"supportedActions,omitempty"` + // VaultName - Name of the vault + VaultName *string `json:"vaultName,omitempty"` + Etag *string `json:"etag,omitempty"` + SourceDataStoreName *string `json:"sourceDataStoreName,omitempty"` + DestinationDataStoreName *string `json:"destinationDataStoreName,omitempty"` +} + +// MarshalJSON is the custom marshaler for AzureBackupJob. +func (abj AzureBackupJob) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if abj.ActivityID != nil { + objectMap["activityID"] = abj.ActivityID + } + if abj.BackupInstanceFriendlyName != nil { + objectMap["backupInstanceFriendlyName"] = abj.BackupInstanceFriendlyName + } + if abj.DataSourceID != nil { + objectMap["dataSourceId"] = abj.DataSourceID + } + if abj.DataSourceLocation != nil { + objectMap["dataSourceLocation"] = abj.DataSourceLocation + } + if abj.DataSourceName != nil { + objectMap["dataSourceName"] = abj.DataSourceName + } + if abj.DataSourceSetName != nil { + objectMap["dataSourceSetName"] = abj.DataSourceSetName + } + if abj.DataSourceType != nil { + objectMap["dataSourceType"] = abj.DataSourceType + } + if abj.Duration != nil { + objectMap["duration"] = abj.Duration + } + if abj.IsUserTriggered != nil { + objectMap["isUserTriggered"] = abj.IsUserTriggered + } + if abj.Operation != nil { + objectMap["operation"] = abj.Operation + } + if abj.OperationCategory != nil { + objectMap["operationCategory"] = abj.OperationCategory + } + if abj.ProgressEnabled != nil { + objectMap["progressEnabled"] = abj.ProgressEnabled + } + if abj.SourceResourceGroup != nil { + objectMap["sourceResourceGroup"] = abj.SourceResourceGroup + } + if abj.SourceSubscriptionID != nil { + objectMap["sourceSubscriptionID"] = abj.SourceSubscriptionID + } + if abj.StartTime != nil { + objectMap["startTime"] = abj.StartTime + } + if abj.Status != nil { + objectMap["status"] = abj.Status + } + if abj.SubscriptionID != nil { + objectMap["subscriptionId"] = abj.SubscriptionID + } + if abj.SupportedActions != nil { + objectMap["supportedActions"] = abj.SupportedActions + } + if abj.VaultName != nil { + objectMap["vaultName"] = abj.VaultName + } + if abj.Etag != nil { + objectMap["etag"] = abj.Etag + } + if abj.SourceDataStoreName != nil { + objectMap["sourceDataStoreName"] = abj.SourceDataStoreName + } + if abj.DestinationDataStoreName != nil { + objectMap["destinationDataStoreName"] = abj.DestinationDataStoreName + } + return json.Marshal(objectMap) +} + +// AzureBackupJobResource azureBackup Job Resource Class +type AzureBackupJobResource struct { + autorest.Response `json:"-"` + // Properties - AzureBackupJobResource properties + Properties *AzureBackupJob `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id represents the complete path to the resource. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name associated with the resource. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type represents the complete path of the form Namespace/ResourceType/ResourceType/... + Type *string `json:"type,omitempty"` + SystemData *SystemData `json:"systemData,omitempty"` +} + +// MarshalJSON is the custom marshaler for AzureBackupJobResource. +func (abjr AzureBackupJobResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if abjr.Properties != nil { + objectMap["properties"] = abjr.Properties + } + if abjr.SystemData != nil { + objectMap["systemData"] = abjr.SystemData + } + return json.Marshal(objectMap) +} + +// AzureBackupJobResourceList list of AzureBackup Job resources +type AzureBackupJobResourceList struct { + autorest.Response `json:"-"` + // Value - List of resources. + Value *[]AzureBackupJobResource `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of resources. Call ListNext() fetches next page of resources. + NextLink *string `json:"nextLink,omitempty"` +} + +// AzureBackupJobResourceListIterator provides access to a complete listing of AzureBackupJobResource +// values. +type AzureBackupJobResourceListIterator struct { + i int + page AzureBackupJobResourceListPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *AzureBackupJobResourceListIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AzureBackupJobResourceListIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *AzureBackupJobResourceListIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter AzureBackupJobResourceListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter AzureBackupJobResourceListIterator) Response() AzureBackupJobResourceList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter AzureBackupJobResourceListIterator) Value() AzureBackupJobResource { + if !iter.page.NotDone() { + return AzureBackupJobResource{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the AzureBackupJobResourceListIterator type. +func NewAzureBackupJobResourceListIterator(page AzureBackupJobResourceListPage) AzureBackupJobResourceListIterator { + return AzureBackupJobResourceListIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (abjrl AzureBackupJobResourceList) IsEmpty() bool { + return abjrl.Value == nil || len(*abjrl.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (abjrl AzureBackupJobResourceList) hasNextLink() bool { + return abjrl.NextLink != nil && len(*abjrl.NextLink) != 0 +} + +// azureBackupJobResourceListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (abjrl AzureBackupJobResourceList) azureBackupJobResourceListPreparer(ctx context.Context) (*http.Request, error) { + if !abjrl.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(abjrl.NextLink))) +} + +// AzureBackupJobResourceListPage contains a page of AzureBackupJobResource values. +type AzureBackupJobResourceListPage struct { + fn func(context.Context, AzureBackupJobResourceList) (AzureBackupJobResourceList, error) + abjrl AzureBackupJobResourceList +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *AzureBackupJobResourceListPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AzureBackupJobResourceListPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.abjrl) + if err != nil { + return err + } + page.abjrl = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *AzureBackupJobResourceListPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page AzureBackupJobResourceListPage) NotDone() bool { + return !page.abjrl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page AzureBackupJobResourceListPage) Response() AzureBackupJobResourceList { + return page.abjrl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page AzureBackupJobResourceListPage) Values() []AzureBackupJobResource { + if page.abjrl.IsEmpty() { + return nil + } + return *page.abjrl.Value +} + +// Creates a new instance of the AzureBackupJobResourceListPage type. +func NewAzureBackupJobResourceListPage(cur AzureBackupJobResourceList, getNextPage func(context.Context, AzureBackupJobResourceList) (AzureBackupJobResourceList, error)) AzureBackupJobResourceListPage { + return AzureBackupJobResourceListPage{ + fn: getNextPage, + abjrl: cur, + } +} + +// AzureBackupParams azure backup parameters +type AzureBackupParams struct { + // BackupType - BackupType ; Full/Incremental etc + BackupType *string `json:"backupType,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeBackupParameters', 'ObjectTypeAzureBackupParams' + ObjectType ObjectTypeBasicBackupParameters `json:"objectType,omitempty"` +} + +// MarshalJSON is the custom marshaler for AzureBackupParams. +func (abp AzureBackupParams) MarshalJSON() ([]byte, error) { + abp.ObjectType = ObjectTypeAzureBackupParams + objectMap := make(map[string]interface{}) + if abp.BackupType != nil { + objectMap["backupType"] = abp.BackupType + } + if abp.ObjectType != "" { + objectMap["objectType"] = abp.ObjectType + } + return json.Marshal(objectMap) +} + +// AsAzureBackupParams is the BasicBackupParameters implementation for AzureBackupParams. +func (abp AzureBackupParams) AsAzureBackupParams() (*AzureBackupParams, bool) { + return &abp, true +} + +// AsBackupParameters is the BasicBackupParameters implementation for AzureBackupParams. +func (abp AzureBackupParams) AsBackupParameters() (*BackupParameters, bool) { + return nil, false +} + +// AsBasicBackupParameters is the BasicBackupParameters implementation for AzureBackupParams. +func (abp AzureBackupParams) AsBasicBackupParameters() (BasicBackupParameters, bool) { + return &abp, true +} + +// BasicAzureBackupRecoveryPoint azure backup recoveryPoint +type BasicAzureBackupRecoveryPoint interface { + AsAzureBackupDiscreteRecoveryPoint() (*AzureBackupDiscreteRecoveryPoint, bool) + AsAzureBackupRecoveryPoint() (*AzureBackupRecoveryPoint, bool) +} + +// AzureBackupRecoveryPoint azure backup recoveryPoint +type AzureBackupRecoveryPoint struct { + // ObjectType - Possible values include: 'ObjectTypeAzureBackupRecoveryPoint', 'ObjectTypeAzureBackupDiscreteRecoveryPoint' + ObjectType ObjectType `json:"objectType,omitempty"` +} + +func unmarshalBasicAzureBackupRecoveryPoint(body []byte) (BasicAzureBackupRecoveryPoint, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["objectType"] { + case string(ObjectTypeAzureBackupDiscreteRecoveryPoint): + var abdrp AzureBackupDiscreteRecoveryPoint + err := json.Unmarshal(body, &abdrp) + return abdrp, err + default: + var abrp AzureBackupRecoveryPoint + err := json.Unmarshal(body, &abrp) + return abrp, err + } +} +func unmarshalBasicAzureBackupRecoveryPointArray(body []byte) ([]BasicAzureBackupRecoveryPoint, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + abrpArray := make([]BasicAzureBackupRecoveryPoint, len(rawMessages)) + + for index, rawMessage := range rawMessages { + abrp, err := unmarshalBasicAzureBackupRecoveryPoint(*rawMessage) + if err != nil { + return nil, err + } + abrpArray[index] = abrp + } + return abrpArray, nil +} + +// MarshalJSON is the custom marshaler for AzureBackupRecoveryPoint. +func (abrp AzureBackupRecoveryPoint) MarshalJSON() ([]byte, error) { + abrp.ObjectType = ObjectTypeAzureBackupRecoveryPoint + objectMap := make(map[string]interface{}) + if abrp.ObjectType != "" { + objectMap["objectType"] = abrp.ObjectType + } + return json.Marshal(objectMap) +} + +// AsAzureBackupDiscreteRecoveryPoint is the BasicAzureBackupRecoveryPoint implementation for AzureBackupRecoveryPoint. +func (abrp AzureBackupRecoveryPoint) AsAzureBackupDiscreteRecoveryPoint() (*AzureBackupDiscreteRecoveryPoint, bool) { + return nil, false +} + +// AsAzureBackupRecoveryPoint is the BasicAzureBackupRecoveryPoint implementation for AzureBackupRecoveryPoint. +func (abrp AzureBackupRecoveryPoint) AsAzureBackupRecoveryPoint() (*AzureBackupRecoveryPoint, bool) { + return &abrp, true +} + +// AsBasicAzureBackupRecoveryPoint is the BasicAzureBackupRecoveryPoint implementation for AzureBackupRecoveryPoint. +func (abrp AzureBackupRecoveryPoint) AsBasicAzureBackupRecoveryPoint() (BasicAzureBackupRecoveryPoint, bool) { + return &abrp, true +} + +// BasicAzureBackupRecoveryPointBasedRestoreRequest azure backup recoveryPoint based restore request +type BasicAzureBackupRecoveryPointBasedRestoreRequest interface { + AsAzureBackupRestoreWithRehydrationRequest() (*AzureBackupRestoreWithRehydrationRequest, bool) + AsAzureBackupRecoveryPointBasedRestoreRequest() (*AzureBackupRecoveryPointBasedRestoreRequest, bool) +} + +// AzureBackupRecoveryPointBasedRestoreRequest azure backup recoveryPoint based restore request +type AzureBackupRecoveryPointBasedRestoreRequest struct { + RecoveryPointID *string `json:"recoveryPointId,omitempty"` + // RestoreTargetInfo - Gets or sets the restore target information. + RestoreTargetInfo BasicRestoreTargetInfoBase `json:"restoreTargetInfo,omitempty"` + // SourceDataStoreType - Gets or sets the type of the source data store. Possible values include: 'SourceDataStoreTypeArchiveStore', 'SourceDataStoreTypeSnapshotStore', 'SourceDataStoreTypeVaultStore' + SourceDataStoreType SourceDataStoreType `json:"sourceDataStoreType,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeAzureBackupRestoreRequest', 'ObjectTypeAzureBackupRecoveryPointBasedRestoreRequest', 'ObjectTypeAzureBackupRestoreWithRehydrationRequest', 'ObjectTypeAzureBackupRecoveryTimeBasedRestoreRequest' + ObjectType ObjectTypeBasicAzureBackupRestoreRequest `json:"objectType,omitempty"` +} + +func unmarshalBasicAzureBackupRecoveryPointBasedRestoreRequest(body []byte) (BasicAzureBackupRecoveryPointBasedRestoreRequest, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["objectType"] { + case string(ObjectTypeAzureBackupRestoreWithRehydrationRequest): + var abrwrr AzureBackupRestoreWithRehydrationRequest + err := json.Unmarshal(body, &abrwrr) + return abrwrr, err + default: + var abrpbrr AzureBackupRecoveryPointBasedRestoreRequest + err := json.Unmarshal(body, &abrpbrr) + return abrpbrr, err + } +} +func unmarshalBasicAzureBackupRecoveryPointBasedRestoreRequestArray(body []byte) ([]BasicAzureBackupRecoveryPointBasedRestoreRequest, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + abrpbrrArray := make([]BasicAzureBackupRecoveryPointBasedRestoreRequest, len(rawMessages)) + + for index, rawMessage := range rawMessages { + abrpbrr, err := unmarshalBasicAzureBackupRecoveryPointBasedRestoreRequest(*rawMessage) + if err != nil { + return nil, err + } + abrpbrrArray[index] = abrpbrr + } + return abrpbrrArray, nil +} + +// MarshalJSON is the custom marshaler for AzureBackupRecoveryPointBasedRestoreRequest. +func (abrpbrr AzureBackupRecoveryPointBasedRestoreRequest) MarshalJSON() ([]byte, error) { + abrpbrr.ObjectType = ObjectTypeAzureBackupRecoveryPointBasedRestoreRequest + objectMap := make(map[string]interface{}) + if abrpbrr.RecoveryPointID != nil { + objectMap["recoveryPointId"] = abrpbrr.RecoveryPointID + } + objectMap["restoreTargetInfo"] = abrpbrr.RestoreTargetInfo + if abrpbrr.SourceDataStoreType != "" { + objectMap["sourceDataStoreType"] = abrpbrr.SourceDataStoreType + } + if abrpbrr.ObjectType != "" { + objectMap["objectType"] = abrpbrr.ObjectType + } + return json.Marshal(objectMap) +} + +// AsAzureBackupRecoveryPointBasedRestoreRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRecoveryPointBasedRestoreRequest. +func (abrpbrr AzureBackupRecoveryPointBasedRestoreRequest) AsAzureBackupRecoveryPointBasedRestoreRequest() (*AzureBackupRecoveryPointBasedRestoreRequest, bool) { + return &abrpbrr, true +} + +// AsBasicAzureBackupRecoveryPointBasedRestoreRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRecoveryPointBasedRestoreRequest. +func (abrpbrr AzureBackupRecoveryPointBasedRestoreRequest) AsBasicAzureBackupRecoveryPointBasedRestoreRequest() (BasicAzureBackupRecoveryPointBasedRestoreRequest, bool) { + return &abrpbrr, true +} + +// AsAzureBackupRestoreWithRehydrationRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRecoveryPointBasedRestoreRequest. +func (abrpbrr AzureBackupRecoveryPointBasedRestoreRequest) AsAzureBackupRestoreWithRehydrationRequest() (*AzureBackupRestoreWithRehydrationRequest, bool) { + return nil, false +} + +// AsAzureBackupRecoveryTimeBasedRestoreRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRecoveryPointBasedRestoreRequest. +func (abrpbrr AzureBackupRecoveryPointBasedRestoreRequest) AsAzureBackupRecoveryTimeBasedRestoreRequest() (*AzureBackupRecoveryTimeBasedRestoreRequest, bool) { + return nil, false +} + +// AsAzureBackupRestoreRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRecoveryPointBasedRestoreRequest. +func (abrpbrr AzureBackupRecoveryPointBasedRestoreRequest) AsAzureBackupRestoreRequest() (*AzureBackupRestoreRequest, bool) { + return nil, false +} + +// AsBasicAzureBackupRestoreRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRecoveryPointBasedRestoreRequest. +func (abrpbrr AzureBackupRecoveryPointBasedRestoreRequest) AsBasicAzureBackupRestoreRequest() (BasicAzureBackupRestoreRequest, bool) { + return &abrpbrr, true +} + +// UnmarshalJSON is the custom unmarshaler for AzureBackupRecoveryPointBasedRestoreRequest struct. +func (abrpbrr *AzureBackupRecoveryPointBasedRestoreRequest) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "recoveryPointId": + if v != nil { + var recoveryPointID string + err = json.Unmarshal(*v, &recoveryPointID) + if err != nil { + return err + } + abrpbrr.RecoveryPointID = &recoveryPointID + } + case "restoreTargetInfo": + if v != nil { + restoreTargetInfo, err := unmarshalBasicRestoreTargetInfoBase(*v) + if err != nil { + return err + } + abrpbrr.RestoreTargetInfo = restoreTargetInfo + } + case "sourceDataStoreType": + if v != nil { + var sourceDataStoreType SourceDataStoreType + err = json.Unmarshal(*v, &sourceDataStoreType) + if err != nil { + return err + } + abrpbrr.SourceDataStoreType = sourceDataStoreType + } + case "objectType": + if v != nil { + var objectType ObjectTypeBasicAzureBackupRestoreRequest + err = json.Unmarshal(*v, &objectType) + if err != nil { + return err + } + abrpbrr.ObjectType = objectType + } + } + } + + return nil +} + +// AzureBackupRecoveryPointResource azure backup recoveryPoint resource +type AzureBackupRecoveryPointResource struct { + autorest.Response `json:"-"` + // Properties - AzureBackupRecoveryPointResource properties + Properties BasicAzureBackupRecoveryPoint `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id represents the complete path to the resource. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name associated with the resource. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type represents the complete path of the form Namespace/ResourceType/ResourceType/... + Type *string `json:"type,omitempty"` + SystemData *SystemData `json:"systemData,omitempty"` +} + +// MarshalJSON is the custom marshaler for AzureBackupRecoveryPointResource. +func (abrpr AzureBackupRecoveryPointResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + objectMap["properties"] = abrpr.Properties + if abrpr.SystemData != nil { + objectMap["systemData"] = abrpr.SystemData + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for AzureBackupRecoveryPointResource struct. +func (abrpr *AzureBackupRecoveryPointResource) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + properties, err := unmarshalBasicAzureBackupRecoveryPoint(*v) + if err != nil { + return err + } + abrpr.Properties = properties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + abrpr.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + abrpr.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + abrpr.Type = &typeVar + } + case "systemData": + if v != nil { + var systemData SystemData + err = json.Unmarshal(*v, &systemData) + if err != nil { + return err + } + abrpr.SystemData = &systemData + } + } + } + + return nil +} + +// AzureBackupRecoveryPointResourceList azure backup recoveryPoint resource list +type AzureBackupRecoveryPointResourceList struct { + autorest.Response `json:"-"` + // Value - List of resources. + Value *[]AzureBackupRecoveryPointResource `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of resources. Call ListNext() fetches next page of resources. + NextLink *string `json:"nextLink,omitempty"` +} + +// AzureBackupRecoveryPointResourceListIterator provides access to a complete listing of +// AzureBackupRecoveryPointResource values. +type AzureBackupRecoveryPointResourceListIterator struct { + i int + page AzureBackupRecoveryPointResourceListPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *AzureBackupRecoveryPointResourceListIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AzureBackupRecoveryPointResourceListIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *AzureBackupRecoveryPointResourceListIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter AzureBackupRecoveryPointResourceListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter AzureBackupRecoveryPointResourceListIterator) Response() AzureBackupRecoveryPointResourceList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter AzureBackupRecoveryPointResourceListIterator) Value() AzureBackupRecoveryPointResource { + if !iter.page.NotDone() { + return AzureBackupRecoveryPointResource{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the AzureBackupRecoveryPointResourceListIterator type. +func NewAzureBackupRecoveryPointResourceListIterator(page AzureBackupRecoveryPointResourceListPage) AzureBackupRecoveryPointResourceListIterator { + return AzureBackupRecoveryPointResourceListIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (abrprl AzureBackupRecoveryPointResourceList) IsEmpty() bool { + return abrprl.Value == nil || len(*abrprl.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (abrprl AzureBackupRecoveryPointResourceList) hasNextLink() bool { + return abrprl.NextLink != nil && len(*abrprl.NextLink) != 0 +} + +// azureBackupRecoveryPointResourceListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (abrprl AzureBackupRecoveryPointResourceList) azureBackupRecoveryPointResourceListPreparer(ctx context.Context) (*http.Request, error) { + if !abrprl.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(abrprl.NextLink))) +} + +// AzureBackupRecoveryPointResourceListPage contains a page of AzureBackupRecoveryPointResource values. +type AzureBackupRecoveryPointResourceListPage struct { + fn func(context.Context, AzureBackupRecoveryPointResourceList) (AzureBackupRecoveryPointResourceList, error) + abrprl AzureBackupRecoveryPointResourceList +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *AzureBackupRecoveryPointResourceListPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AzureBackupRecoveryPointResourceListPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.abrprl) + if err != nil { + return err + } + page.abrprl = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *AzureBackupRecoveryPointResourceListPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page AzureBackupRecoveryPointResourceListPage) NotDone() bool { + return !page.abrprl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page AzureBackupRecoveryPointResourceListPage) Response() AzureBackupRecoveryPointResourceList { + return page.abrprl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page AzureBackupRecoveryPointResourceListPage) Values() []AzureBackupRecoveryPointResource { + if page.abrprl.IsEmpty() { + return nil + } + return *page.abrprl.Value +} + +// Creates a new instance of the AzureBackupRecoveryPointResourceListPage type. +func NewAzureBackupRecoveryPointResourceListPage(cur AzureBackupRecoveryPointResourceList, getNextPage func(context.Context, AzureBackupRecoveryPointResourceList) (AzureBackupRecoveryPointResourceList, error)) AzureBackupRecoveryPointResourceListPage { + return AzureBackupRecoveryPointResourceListPage{ + fn: getNextPage, + abrprl: cur, + } +} + +// AzureBackupRecoveryTimeBasedRestoreRequest azureBackup RecoveryPointTime Based Restore Request +type AzureBackupRecoveryTimeBasedRestoreRequest struct { + // RecoveryPointTime - The recovery time in ISO 8601 format example - 2020-08-14T17:30:00.0000000Z. + RecoveryPointTime *string `json:"recoveryPointTime,omitempty"` + // RestoreTargetInfo - Gets or sets the restore target information. + RestoreTargetInfo BasicRestoreTargetInfoBase `json:"restoreTargetInfo,omitempty"` + // SourceDataStoreType - Gets or sets the type of the source data store. Possible values include: 'SourceDataStoreTypeArchiveStore', 'SourceDataStoreTypeSnapshotStore', 'SourceDataStoreTypeVaultStore' + SourceDataStoreType SourceDataStoreType `json:"sourceDataStoreType,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeAzureBackupRestoreRequest', 'ObjectTypeAzureBackupRecoveryPointBasedRestoreRequest', 'ObjectTypeAzureBackupRestoreWithRehydrationRequest', 'ObjectTypeAzureBackupRecoveryTimeBasedRestoreRequest' + ObjectType ObjectTypeBasicAzureBackupRestoreRequest `json:"objectType,omitempty"` +} + +// MarshalJSON is the custom marshaler for AzureBackupRecoveryTimeBasedRestoreRequest. +func (abrtbrr AzureBackupRecoveryTimeBasedRestoreRequest) MarshalJSON() ([]byte, error) { + abrtbrr.ObjectType = ObjectTypeAzureBackupRecoveryTimeBasedRestoreRequest + objectMap := make(map[string]interface{}) + if abrtbrr.RecoveryPointTime != nil { + objectMap["recoveryPointTime"] = abrtbrr.RecoveryPointTime + } + objectMap["restoreTargetInfo"] = abrtbrr.RestoreTargetInfo + if abrtbrr.SourceDataStoreType != "" { + objectMap["sourceDataStoreType"] = abrtbrr.SourceDataStoreType + } + if abrtbrr.ObjectType != "" { + objectMap["objectType"] = abrtbrr.ObjectType + } + return json.Marshal(objectMap) +} + +// AsAzureBackupRecoveryPointBasedRestoreRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRecoveryTimeBasedRestoreRequest. +func (abrtbrr AzureBackupRecoveryTimeBasedRestoreRequest) AsAzureBackupRecoveryPointBasedRestoreRequest() (*AzureBackupRecoveryPointBasedRestoreRequest, bool) { + return nil, false +} + +// AsBasicAzureBackupRecoveryPointBasedRestoreRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRecoveryTimeBasedRestoreRequest. +func (abrtbrr AzureBackupRecoveryTimeBasedRestoreRequest) AsBasicAzureBackupRecoveryPointBasedRestoreRequest() (BasicAzureBackupRecoveryPointBasedRestoreRequest, bool) { + return nil, false +} + +// AsAzureBackupRestoreWithRehydrationRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRecoveryTimeBasedRestoreRequest. +func (abrtbrr AzureBackupRecoveryTimeBasedRestoreRequest) AsAzureBackupRestoreWithRehydrationRequest() (*AzureBackupRestoreWithRehydrationRequest, bool) { + return nil, false +} + +// AsAzureBackupRecoveryTimeBasedRestoreRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRecoveryTimeBasedRestoreRequest. +func (abrtbrr AzureBackupRecoveryTimeBasedRestoreRequest) AsAzureBackupRecoveryTimeBasedRestoreRequest() (*AzureBackupRecoveryTimeBasedRestoreRequest, bool) { + return &abrtbrr, true +} + +// AsAzureBackupRestoreRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRecoveryTimeBasedRestoreRequest. +func (abrtbrr AzureBackupRecoveryTimeBasedRestoreRequest) AsAzureBackupRestoreRequest() (*AzureBackupRestoreRequest, bool) { + return nil, false +} + +// AsBasicAzureBackupRestoreRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRecoveryTimeBasedRestoreRequest. +func (abrtbrr AzureBackupRecoveryTimeBasedRestoreRequest) AsBasicAzureBackupRestoreRequest() (BasicAzureBackupRestoreRequest, bool) { + return &abrtbrr, true +} + +// UnmarshalJSON is the custom unmarshaler for AzureBackupRecoveryTimeBasedRestoreRequest struct. +func (abrtbrr *AzureBackupRecoveryTimeBasedRestoreRequest) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "recoveryPointTime": + if v != nil { + var recoveryPointTime string + err = json.Unmarshal(*v, &recoveryPointTime) + if err != nil { + return err + } + abrtbrr.RecoveryPointTime = &recoveryPointTime + } + case "restoreTargetInfo": + if v != nil { + restoreTargetInfo, err := unmarshalBasicRestoreTargetInfoBase(*v) + if err != nil { + return err + } + abrtbrr.RestoreTargetInfo = restoreTargetInfo + } + case "sourceDataStoreType": + if v != nil { + var sourceDataStoreType SourceDataStoreType + err = json.Unmarshal(*v, &sourceDataStoreType) + if err != nil { + return err + } + abrtbrr.SourceDataStoreType = sourceDataStoreType + } + case "objectType": + if v != nil { + var objectType ObjectTypeBasicAzureBackupRestoreRequest + err = json.Unmarshal(*v, &objectType) + if err != nil { + return err + } + abrtbrr.ObjectType = objectType + } + } + } + + return nil +} + +// AzureBackupRehydrationRequest azure Backup Rehydrate Request +type AzureBackupRehydrationRequest struct { + // RecoveryPointID - Id of the recovery point to be recovered + RecoveryPointID *string `json:"recoveryPointId,omitempty"` + // RehydrationPriority - Priority to be used for rehydration. Values High or Standard. Possible values include: 'RehydrationPriorityInvalid', 'RehydrationPriorityHigh', 'RehydrationPriorityStandard' + RehydrationPriority RehydrationPriority `json:"rehydrationPriority,omitempty"` + // RehydrationRetentionDuration - Retention duration in ISO 8601 format i.e P10D . + RehydrationRetentionDuration *string `json:"rehydrationRetentionDuration,omitempty"` +} + +// BasicAzureBackupRestoreRequest azure backup restore request +type BasicAzureBackupRestoreRequest interface { + AsAzureBackupRecoveryPointBasedRestoreRequest() (*AzureBackupRecoveryPointBasedRestoreRequest, bool) + AsBasicAzureBackupRecoveryPointBasedRestoreRequest() (BasicAzureBackupRecoveryPointBasedRestoreRequest, bool) + AsAzureBackupRestoreWithRehydrationRequest() (*AzureBackupRestoreWithRehydrationRequest, bool) + AsAzureBackupRecoveryTimeBasedRestoreRequest() (*AzureBackupRecoveryTimeBasedRestoreRequest, bool) + AsAzureBackupRestoreRequest() (*AzureBackupRestoreRequest, bool) +} + +// AzureBackupRestoreRequest azure backup restore request +type AzureBackupRestoreRequest struct { + // RestoreTargetInfo - Gets or sets the restore target information. + RestoreTargetInfo BasicRestoreTargetInfoBase `json:"restoreTargetInfo,omitempty"` + // SourceDataStoreType - Gets or sets the type of the source data store. Possible values include: 'SourceDataStoreTypeArchiveStore', 'SourceDataStoreTypeSnapshotStore', 'SourceDataStoreTypeVaultStore' + SourceDataStoreType SourceDataStoreType `json:"sourceDataStoreType,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeAzureBackupRestoreRequest', 'ObjectTypeAzureBackupRecoveryPointBasedRestoreRequest', 'ObjectTypeAzureBackupRestoreWithRehydrationRequest', 'ObjectTypeAzureBackupRecoveryTimeBasedRestoreRequest' + ObjectType ObjectTypeBasicAzureBackupRestoreRequest `json:"objectType,omitempty"` +} + +func unmarshalBasicAzureBackupRestoreRequest(body []byte) (BasicAzureBackupRestoreRequest, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["objectType"] { + case string(ObjectTypeAzureBackupRecoveryPointBasedRestoreRequest): + var abrpbrr AzureBackupRecoveryPointBasedRestoreRequest + err := json.Unmarshal(body, &abrpbrr) + return abrpbrr, err + case string(ObjectTypeAzureBackupRestoreWithRehydrationRequest): + var abrwrr AzureBackupRestoreWithRehydrationRequest + err := json.Unmarshal(body, &abrwrr) + return abrwrr, err + case string(ObjectTypeAzureBackupRecoveryTimeBasedRestoreRequest): + var abrtbrr AzureBackupRecoveryTimeBasedRestoreRequest + err := json.Unmarshal(body, &abrtbrr) + return abrtbrr, err + default: + var abrr AzureBackupRestoreRequest + err := json.Unmarshal(body, &abrr) + return abrr, err + } +} +func unmarshalBasicAzureBackupRestoreRequestArray(body []byte) ([]BasicAzureBackupRestoreRequest, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + abrrArray := make([]BasicAzureBackupRestoreRequest, len(rawMessages)) + + for index, rawMessage := range rawMessages { + abrr, err := unmarshalBasicAzureBackupRestoreRequest(*rawMessage) + if err != nil { + return nil, err + } + abrrArray[index] = abrr + } + return abrrArray, nil +} + +// MarshalJSON is the custom marshaler for AzureBackupRestoreRequest. +func (abrr AzureBackupRestoreRequest) MarshalJSON() ([]byte, error) { + abrr.ObjectType = ObjectTypeAzureBackupRestoreRequest + objectMap := make(map[string]interface{}) + objectMap["restoreTargetInfo"] = abrr.RestoreTargetInfo + if abrr.SourceDataStoreType != "" { + objectMap["sourceDataStoreType"] = abrr.SourceDataStoreType + } + if abrr.ObjectType != "" { + objectMap["objectType"] = abrr.ObjectType + } + return json.Marshal(objectMap) +} + +// AsAzureBackupRecoveryPointBasedRestoreRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRestoreRequest. +func (abrr AzureBackupRestoreRequest) AsAzureBackupRecoveryPointBasedRestoreRequest() (*AzureBackupRecoveryPointBasedRestoreRequest, bool) { + return nil, false +} + +// AsBasicAzureBackupRecoveryPointBasedRestoreRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRestoreRequest. +func (abrr AzureBackupRestoreRequest) AsBasicAzureBackupRecoveryPointBasedRestoreRequest() (BasicAzureBackupRecoveryPointBasedRestoreRequest, bool) { + return nil, false +} + +// AsAzureBackupRestoreWithRehydrationRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRestoreRequest. +func (abrr AzureBackupRestoreRequest) AsAzureBackupRestoreWithRehydrationRequest() (*AzureBackupRestoreWithRehydrationRequest, bool) { + return nil, false +} + +// AsAzureBackupRecoveryTimeBasedRestoreRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRestoreRequest. +func (abrr AzureBackupRestoreRequest) AsAzureBackupRecoveryTimeBasedRestoreRequest() (*AzureBackupRecoveryTimeBasedRestoreRequest, bool) { + return nil, false +} + +// AsAzureBackupRestoreRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRestoreRequest. +func (abrr AzureBackupRestoreRequest) AsAzureBackupRestoreRequest() (*AzureBackupRestoreRequest, bool) { + return &abrr, true +} + +// AsBasicAzureBackupRestoreRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRestoreRequest. +func (abrr AzureBackupRestoreRequest) AsBasicAzureBackupRestoreRequest() (BasicAzureBackupRestoreRequest, bool) { + return &abrr, true +} + +// UnmarshalJSON is the custom unmarshaler for AzureBackupRestoreRequest struct. +func (abrr *AzureBackupRestoreRequest) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "restoreTargetInfo": + if v != nil { + restoreTargetInfo, err := unmarshalBasicRestoreTargetInfoBase(*v) + if err != nil { + return err + } + abrr.RestoreTargetInfo = restoreTargetInfo + } + case "sourceDataStoreType": + if v != nil { + var sourceDataStoreType SourceDataStoreType + err = json.Unmarshal(*v, &sourceDataStoreType) + if err != nil { + return err + } + abrr.SourceDataStoreType = sourceDataStoreType + } + case "objectType": + if v != nil { + var objectType ObjectTypeBasicAzureBackupRestoreRequest + err = json.Unmarshal(*v, &objectType) + if err != nil { + return err + } + abrr.ObjectType = objectType + } + } + } + + return nil +} + +// AzureBackupRestoreWithRehydrationRequest azureBackup Restore with Rehydration Request +type AzureBackupRestoreWithRehydrationRequest struct { + // RehydrationPriority - Priority to be used for rehydration. Values High or Standard. Possible values include: 'RehydrationPriorityInvalid', 'RehydrationPriorityHigh', 'RehydrationPriorityStandard' + RehydrationPriority RehydrationPriority `json:"rehydrationPriority,omitempty"` + // RehydrationRetentionDuration - Retention duration in ISO 8601 format i.e P10D . + RehydrationRetentionDuration *string `json:"rehydrationRetentionDuration,omitempty"` + RecoveryPointID *string `json:"recoveryPointId,omitempty"` + // RestoreTargetInfo - Gets or sets the restore target information. + RestoreTargetInfo BasicRestoreTargetInfoBase `json:"restoreTargetInfo,omitempty"` + // SourceDataStoreType - Gets or sets the type of the source data store. Possible values include: 'SourceDataStoreTypeArchiveStore', 'SourceDataStoreTypeSnapshotStore', 'SourceDataStoreTypeVaultStore' + SourceDataStoreType SourceDataStoreType `json:"sourceDataStoreType,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeAzureBackupRestoreRequest', 'ObjectTypeAzureBackupRecoveryPointBasedRestoreRequest', 'ObjectTypeAzureBackupRestoreWithRehydrationRequest', 'ObjectTypeAzureBackupRecoveryTimeBasedRestoreRequest' + ObjectType ObjectTypeBasicAzureBackupRestoreRequest `json:"objectType,omitempty"` +} + +// MarshalJSON is the custom marshaler for AzureBackupRestoreWithRehydrationRequest. +func (abrwrr AzureBackupRestoreWithRehydrationRequest) MarshalJSON() ([]byte, error) { + abrwrr.ObjectType = ObjectTypeAzureBackupRestoreWithRehydrationRequest + objectMap := make(map[string]interface{}) + if abrwrr.RehydrationPriority != "" { + objectMap["rehydrationPriority"] = abrwrr.RehydrationPriority + } + if abrwrr.RehydrationRetentionDuration != nil { + objectMap["rehydrationRetentionDuration"] = abrwrr.RehydrationRetentionDuration + } + if abrwrr.RecoveryPointID != nil { + objectMap["recoveryPointId"] = abrwrr.RecoveryPointID + } + objectMap["restoreTargetInfo"] = abrwrr.RestoreTargetInfo + if abrwrr.SourceDataStoreType != "" { + objectMap["sourceDataStoreType"] = abrwrr.SourceDataStoreType + } + if abrwrr.ObjectType != "" { + objectMap["objectType"] = abrwrr.ObjectType + } + return json.Marshal(objectMap) +} + +// AsAzureBackupRecoveryPointBasedRestoreRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRestoreWithRehydrationRequest. +func (abrwrr AzureBackupRestoreWithRehydrationRequest) AsAzureBackupRecoveryPointBasedRestoreRequest() (*AzureBackupRecoveryPointBasedRestoreRequest, bool) { + return nil, false +} + +// AsBasicAzureBackupRecoveryPointBasedRestoreRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRestoreWithRehydrationRequest. +func (abrwrr AzureBackupRestoreWithRehydrationRequest) AsBasicAzureBackupRecoveryPointBasedRestoreRequest() (BasicAzureBackupRecoveryPointBasedRestoreRequest, bool) { + return &abrwrr, true +} + +// AsAzureBackupRestoreWithRehydrationRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRestoreWithRehydrationRequest. +func (abrwrr AzureBackupRestoreWithRehydrationRequest) AsAzureBackupRestoreWithRehydrationRequest() (*AzureBackupRestoreWithRehydrationRequest, bool) { + return &abrwrr, true +} + +// AsAzureBackupRecoveryTimeBasedRestoreRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRestoreWithRehydrationRequest. +func (abrwrr AzureBackupRestoreWithRehydrationRequest) AsAzureBackupRecoveryTimeBasedRestoreRequest() (*AzureBackupRecoveryTimeBasedRestoreRequest, bool) { + return nil, false +} + +// AsAzureBackupRestoreRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRestoreWithRehydrationRequest. +func (abrwrr AzureBackupRestoreWithRehydrationRequest) AsAzureBackupRestoreRequest() (*AzureBackupRestoreRequest, bool) { + return nil, false +} + +// AsBasicAzureBackupRestoreRequest is the BasicAzureBackupRestoreRequest implementation for AzureBackupRestoreWithRehydrationRequest. +func (abrwrr AzureBackupRestoreWithRehydrationRequest) AsBasicAzureBackupRestoreRequest() (BasicAzureBackupRestoreRequest, bool) { + return &abrwrr, true +} + +// UnmarshalJSON is the custom unmarshaler for AzureBackupRestoreWithRehydrationRequest struct. +func (abrwrr *AzureBackupRestoreWithRehydrationRequest) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "rehydrationPriority": + if v != nil { + var rehydrationPriority RehydrationPriority + err = json.Unmarshal(*v, &rehydrationPriority) + if err != nil { + return err + } + abrwrr.RehydrationPriority = rehydrationPriority + } + case "rehydrationRetentionDuration": + if v != nil { + var rehydrationRetentionDuration string + err = json.Unmarshal(*v, &rehydrationRetentionDuration) + if err != nil { + return err + } + abrwrr.RehydrationRetentionDuration = &rehydrationRetentionDuration + } + case "recoveryPointId": + if v != nil { + var recoveryPointID string + err = json.Unmarshal(*v, &recoveryPointID) + if err != nil { + return err + } + abrwrr.RecoveryPointID = &recoveryPointID + } + case "restoreTargetInfo": + if v != nil { + restoreTargetInfo, err := unmarshalBasicRestoreTargetInfoBase(*v) + if err != nil { + return err + } + abrwrr.RestoreTargetInfo = restoreTargetInfo + } + case "sourceDataStoreType": + if v != nil { + var sourceDataStoreType SourceDataStoreType + err = json.Unmarshal(*v, &sourceDataStoreType) + if err != nil { + return err + } + abrwrr.SourceDataStoreType = sourceDataStoreType + } + case "objectType": + if v != nil { + var objectType ObjectTypeBasicAzureBackupRestoreRequest + err = json.Unmarshal(*v, &objectType) + if err != nil { + return err + } + abrwrr.ObjectType = objectType + } + } + } + + return nil +} + +// AzureBackupRule azure backup rule +type AzureBackupRule struct { + BackupParameters BasicBackupParameters `json:"backupParameters,omitempty"` + DataStore *DataStoreInfoBase `json:"dataStore,omitempty"` + Trigger BasicTriggerContext `json:"trigger,omitempty"` + Name *string `json:"name,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeBasePolicyRule', 'ObjectTypeAzureBackupRule', 'ObjectTypeAzureRetentionRule' + ObjectType ObjectTypeBasicBasePolicyRule `json:"objectType,omitempty"` +} + +// MarshalJSON is the custom marshaler for AzureBackupRule. +func (abr AzureBackupRule) MarshalJSON() ([]byte, error) { + abr.ObjectType = ObjectTypeAzureBackupRule + objectMap := make(map[string]interface{}) + objectMap["backupParameters"] = abr.BackupParameters + if abr.DataStore != nil { + objectMap["dataStore"] = abr.DataStore + } + objectMap["trigger"] = abr.Trigger + if abr.Name != nil { + objectMap["name"] = abr.Name + } + if abr.ObjectType != "" { + objectMap["objectType"] = abr.ObjectType + } + return json.Marshal(objectMap) +} + +// AsAzureBackupRule is the BasicBasePolicyRule implementation for AzureBackupRule. +func (abr AzureBackupRule) AsAzureBackupRule() (*AzureBackupRule, bool) { + return &abr, true +} + +// AsAzureRetentionRule is the BasicBasePolicyRule implementation for AzureBackupRule. +func (abr AzureBackupRule) AsAzureRetentionRule() (*AzureRetentionRule, bool) { + return nil, false +} + +// AsBasePolicyRule is the BasicBasePolicyRule implementation for AzureBackupRule. +func (abr AzureBackupRule) AsBasePolicyRule() (*BasePolicyRule, bool) { + return nil, false +} + +// AsBasicBasePolicyRule is the BasicBasePolicyRule implementation for AzureBackupRule. +func (abr AzureBackupRule) AsBasicBasePolicyRule() (BasicBasePolicyRule, bool) { + return &abr, true +} + +// UnmarshalJSON is the custom unmarshaler for AzureBackupRule struct. +func (abr *AzureBackupRule) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "backupParameters": + if v != nil { + backupParameters, err := unmarshalBasicBackupParameters(*v) + if err != nil { + return err + } + abr.BackupParameters = backupParameters + } + case "dataStore": + if v != nil { + var dataStore DataStoreInfoBase + err = json.Unmarshal(*v, &dataStore) + if err != nil { + return err + } + abr.DataStore = &dataStore + } + case "trigger": + if v != nil { + trigger, err := unmarshalBasicTriggerContext(*v) + if err != nil { + return err + } + abr.Trigger = trigger + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + abr.Name = &name + } + case "objectType": + if v != nil { + var objectType ObjectTypeBasicBasePolicyRule + err = json.Unmarshal(*v, &objectType) + if err != nil { + return err + } + abr.ObjectType = objectType + } + } + } + + return nil +} + +// AzureOperationalStoreParameters parameters for Operational-Tier DataStore +type AzureOperationalStoreParameters struct { + // ResourceGroupID - Gets or sets the Snapshot Resource Group Uri. + ResourceGroupID *string `json:"resourceGroupId,omitempty"` + // DataStoreType - type of datastore; Operational/Vault/Archive. Possible values include: 'OperationalStore', 'VaultStore', 'ArchiveStore' + DataStoreType DataStoreTypes `json:"dataStoreType,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeDataStoreParameters', 'ObjectTypeAzureOperationalStoreParameters' + ObjectType ObjectTypeBasicDataStoreParameters `json:"objectType,omitempty"` +} + +// MarshalJSON is the custom marshaler for AzureOperationalStoreParameters. +func (aosp AzureOperationalStoreParameters) MarshalJSON() ([]byte, error) { + aosp.ObjectType = ObjectTypeAzureOperationalStoreParameters + objectMap := make(map[string]interface{}) + if aosp.ResourceGroupID != nil { + objectMap["resourceGroupId"] = aosp.ResourceGroupID + } + if aosp.DataStoreType != "" { + objectMap["dataStoreType"] = aosp.DataStoreType + } + if aosp.ObjectType != "" { + objectMap["objectType"] = aosp.ObjectType + } + return json.Marshal(objectMap) +} + +// AsAzureOperationalStoreParameters is the BasicDataStoreParameters implementation for AzureOperationalStoreParameters. +func (aosp AzureOperationalStoreParameters) AsAzureOperationalStoreParameters() (*AzureOperationalStoreParameters, bool) { + return &aosp, true +} + +// AsDataStoreParameters is the BasicDataStoreParameters implementation for AzureOperationalStoreParameters. +func (aosp AzureOperationalStoreParameters) AsDataStoreParameters() (*DataStoreParameters, bool) { + return nil, false +} + +// AsBasicDataStoreParameters is the BasicDataStoreParameters implementation for AzureOperationalStoreParameters. +func (aosp AzureOperationalStoreParameters) AsBasicDataStoreParameters() (BasicDataStoreParameters, bool) { + return &aosp, true +} + +// AzureRetentionRule azure retention rule +type AzureRetentionRule struct { + IsDefault *bool `json:"isDefault,omitempty"` + Lifecycles *[]SourceLifeCycle `json:"lifecycles,omitempty"` + Name *string `json:"name,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeBasePolicyRule', 'ObjectTypeAzureBackupRule', 'ObjectTypeAzureRetentionRule' + ObjectType ObjectTypeBasicBasePolicyRule `json:"objectType,omitempty"` +} + +// MarshalJSON is the custom marshaler for AzureRetentionRule. +func (arr AzureRetentionRule) MarshalJSON() ([]byte, error) { + arr.ObjectType = ObjectTypeAzureRetentionRule + objectMap := make(map[string]interface{}) + if arr.IsDefault != nil { + objectMap["isDefault"] = arr.IsDefault + } + if arr.Lifecycles != nil { + objectMap["lifecycles"] = arr.Lifecycles + } + if arr.Name != nil { + objectMap["name"] = arr.Name + } + if arr.ObjectType != "" { + objectMap["objectType"] = arr.ObjectType + } + return json.Marshal(objectMap) +} + +// AsAzureBackupRule is the BasicBasePolicyRule implementation for AzureRetentionRule. +func (arr AzureRetentionRule) AsAzureBackupRule() (*AzureBackupRule, bool) { + return nil, false +} + +// AsAzureRetentionRule is the BasicBasePolicyRule implementation for AzureRetentionRule. +func (arr AzureRetentionRule) AsAzureRetentionRule() (*AzureRetentionRule, bool) { + return &arr, true +} + +// AsBasePolicyRule is the BasicBasePolicyRule implementation for AzureRetentionRule. +func (arr AzureRetentionRule) AsBasePolicyRule() (*BasePolicyRule, bool) { + return nil, false +} + +// AsBasicBasePolicyRule is the BasicBasePolicyRule implementation for AzureRetentionRule. +func (arr AzureRetentionRule) AsBasicBasePolicyRule() (BasicBasePolicyRule, bool) { + return &arr, true +} + +// BasicBackupCriteria backupCriteria base class +type BasicBackupCriteria interface { + AsScheduleBasedBackupCriteria() (*ScheduleBasedBackupCriteria, bool) + AsBackupCriteria() (*BackupCriteria, bool) +} + +// BackupCriteria backupCriteria base class +type BackupCriteria struct { + // ObjectType - Possible values include: 'ObjectTypeBackupCriteria', 'ObjectTypeScheduleBasedBackupCriteria' + ObjectType ObjectTypeBasicBackupCriteria `json:"objectType,omitempty"` +} + +func unmarshalBasicBackupCriteria(body []byte) (BasicBackupCriteria, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["objectType"] { + case string(ObjectTypeScheduleBasedBackupCriteria): + var sbbc ScheduleBasedBackupCriteria + err := json.Unmarshal(body, &sbbc) + return sbbc, err + default: + var bc BackupCriteria + err := json.Unmarshal(body, &bc) + return bc, err + } +} +func unmarshalBasicBackupCriteriaArray(body []byte) ([]BasicBackupCriteria, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + bcArray := make([]BasicBackupCriteria, len(rawMessages)) + + for index, rawMessage := range rawMessages { + bc, err := unmarshalBasicBackupCriteria(*rawMessage) + if err != nil { + return nil, err + } + bcArray[index] = bc + } + return bcArray, nil +} + +// MarshalJSON is the custom marshaler for BackupCriteria. +func (bc BackupCriteria) MarshalJSON() ([]byte, error) { + bc.ObjectType = ObjectTypeBackupCriteria + objectMap := make(map[string]interface{}) + if bc.ObjectType != "" { + objectMap["objectType"] = bc.ObjectType + } + return json.Marshal(objectMap) +} + +// AsScheduleBasedBackupCriteria is the BasicBackupCriteria implementation for BackupCriteria. +func (bc BackupCriteria) AsScheduleBasedBackupCriteria() (*ScheduleBasedBackupCriteria, bool) { + return nil, false +} + +// AsBackupCriteria is the BasicBackupCriteria implementation for BackupCriteria. +func (bc BackupCriteria) AsBackupCriteria() (*BackupCriteria, bool) { + return &bc, true +} + +// AsBasicBackupCriteria is the BasicBackupCriteria implementation for BackupCriteria. +func (bc BackupCriteria) AsBasicBackupCriteria() (BasicBackupCriteria, bool) { + return &bc, true +} + +// BackupInstance backup Instance +type BackupInstance struct { + // FriendlyName - Gets or sets the Backup Instance friendly name. + FriendlyName *string `json:"friendlyName,omitempty"` + // DataSourceInfo - Gets or sets the data source information. + DataSourceInfo *Datasource `json:"dataSourceInfo,omitempty"` + // DataSourceSetInfo - Gets or sets the data source set information. + DataSourceSetInfo *DatasourceSet `json:"dataSourceSetInfo,omitempty"` + // PolicyInfo - Gets or sets the policy information. + PolicyInfo *PolicyInfo `json:"policyInfo,omitempty"` + // ProtectionStatus - READ-ONLY; Specifies the protection status of the resource + ProtectionStatus *ProtectionStatusDetails `json:"protectionStatus,omitempty"` + // CurrentProtectionState - READ-ONLY; Specifies the current protection state of the resource. Possible values include: 'Invalid', 'NotProtected', 'ConfiguringProtection', 'ProtectionConfigured', 'BackupSchedulesSuspended', 'RetentionSchedulesSuspended', 'ProtectionStopped', 'ProtectionError', 'ConfiguringProtectionFailed', 'SoftDeleting', 'SoftDeleted', 'UpdatingProtection' + CurrentProtectionState CurrentProtectionState `json:"currentProtectionState,omitempty"` + // ProtectionErrorDetails - READ-ONLY; Specifies the protection error of the resource + ProtectionErrorDetails *UserFacingError `json:"protectionErrorDetails,omitempty"` + // ProvisioningState - READ-ONLY; Specifies the provisioning state of the resource i.e. provisioning/updating/Succeeded/Failed + ProvisioningState *string `json:"provisioningState,omitempty"` + ObjectType *string `json:"objectType,omitempty"` +} + +// MarshalJSON is the custom marshaler for BackupInstance. +func (bi BackupInstance) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if bi.FriendlyName != nil { + objectMap["friendlyName"] = bi.FriendlyName + } + if bi.DataSourceInfo != nil { + objectMap["dataSourceInfo"] = bi.DataSourceInfo + } + if bi.DataSourceSetInfo != nil { + objectMap["dataSourceSetInfo"] = bi.DataSourceSetInfo + } + if bi.PolicyInfo != nil { + objectMap["policyInfo"] = bi.PolicyInfo + } + if bi.ObjectType != nil { + objectMap["objectType"] = bi.ObjectType + } + return json.Marshal(objectMap) +} + +// BackupInstanceResource backupInstance Resource +type BackupInstanceResource struct { + autorest.Response `json:"-"` + // Properties - BackupInstanceResource properties + Properties *BackupInstance `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id represents the complete path to the resource. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name associated with the resource. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type represents the complete path of the form Namespace/ResourceType/ResourceType/... + Type *string `json:"type,omitempty"` + SystemData *SystemData `json:"systemData,omitempty"` +} + +// MarshalJSON is the custom marshaler for BackupInstanceResource. +func (bir BackupInstanceResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if bir.Properties != nil { + objectMap["properties"] = bir.Properties + } + if bir.SystemData != nil { + objectMap["systemData"] = bir.SystemData + } + return json.Marshal(objectMap) +} + +// BackupInstanceResourceList backupInstance Resource list response +type BackupInstanceResourceList struct { + autorest.Response `json:"-"` + // Value - List of resources. + Value *[]BackupInstanceResource `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of resources. Call ListNext() fetches next page of resources. + NextLink *string `json:"nextLink,omitempty"` +} + +// BackupInstanceResourceListIterator provides access to a complete listing of BackupInstanceResource +// values. +type BackupInstanceResourceListIterator struct { + i int + page BackupInstanceResourceListPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *BackupInstanceResourceListIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupInstanceResourceListIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *BackupInstanceResourceListIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter BackupInstanceResourceListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter BackupInstanceResourceListIterator) Response() BackupInstanceResourceList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter BackupInstanceResourceListIterator) Value() BackupInstanceResource { + if !iter.page.NotDone() { + return BackupInstanceResource{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the BackupInstanceResourceListIterator type. +func NewBackupInstanceResourceListIterator(page BackupInstanceResourceListPage) BackupInstanceResourceListIterator { + return BackupInstanceResourceListIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (birl BackupInstanceResourceList) IsEmpty() bool { + return birl.Value == nil || len(*birl.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (birl BackupInstanceResourceList) hasNextLink() bool { + return birl.NextLink != nil && len(*birl.NextLink) != 0 +} + +// backupInstanceResourceListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (birl BackupInstanceResourceList) backupInstanceResourceListPreparer(ctx context.Context) (*http.Request, error) { + if !birl.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(birl.NextLink))) +} + +// BackupInstanceResourceListPage contains a page of BackupInstanceResource values. +type BackupInstanceResourceListPage struct { + fn func(context.Context, BackupInstanceResourceList) (BackupInstanceResourceList, error) + birl BackupInstanceResourceList +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *BackupInstanceResourceListPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupInstanceResourceListPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.birl) + if err != nil { + return err + } + page.birl = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *BackupInstanceResourceListPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page BackupInstanceResourceListPage) NotDone() bool { + return !page.birl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page BackupInstanceResourceListPage) Response() BackupInstanceResourceList { + return page.birl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page BackupInstanceResourceListPage) Values() []BackupInstanceResource { + if page.birl.IsEmpty() { + return nil + } + return *page.birl.Value +} + +// Creates a new instance of the BackupInstanceResourceListPage type. +func NewBackupInstanceResourceListPage(cur BackupInstanceResourceList, getNextPage func(context.Context, BackupInstanceResourceList) (BackupInstanceResourceList, error)) BackupInstanceResourceListPage { + return BackupInstanceResourceListPage{ + fn: getNextPage, + birl: cur, + } +} + +// BackupInstancesAdhocBackupFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type BackupInstancesAdhocBackupFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(BackupInstancesClient) (OperationJobExtendedInfo, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *BackupInstancesAdhocBackupFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for BackupInstancesAdhocBackupFuture.Result. +func (future *BackupInstancesAdhocBackupFuture) result(client BackupInstancesClient) (ojei OperationJobExtendedInfo, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesAdhocBackupFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ojei.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("dataprotection.BackupInstancesAdhocBackupFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ojei.Response.Response, err = future.GetResult(sender); err == nil && ojei.Response.Response.StatusCode != http.StatusNoContent { + ojei, err = client.AdhocBackupResponder(ojei.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesAdhocBackupFuture", "Result", ojei.Response.Response, "Failure responding to request") + } + } + return +} + +// BackupInstancesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type BackupInstancesCreateOrUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(BackupInstancesClient) (BackupInstanceResource, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *BackupInstancesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for BackupInstancesCreateOrUpdateFuture.Result. +func (future *BackupInstancesCreateOrUpdateFuture) result(client BackupInstancesClient) (bir BackupInstanceResource, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + bir.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("dataprotection.BackupInstancesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if bir.Response.Response, err = future.GetResult(sender); err == nil && bir.Response.Response.StatusCode != http.StatusNoContent { + bir, err = client.CreateOrUpdateResponder(bir.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesCreateOrUpdateFuture", "Result", bir.Response.Response, "Failure responding to request") + } + } + return +} + +// BackupInstancesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type BackupInstancesDeleteFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(BackupInstancesClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *BackupInstancesDeleteFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for BackupInstancesDeleteFuture.Result. +func (future *BackupInstancesDeleteFuture) result(client BackupInstancesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("dataprotection.BackupInstancesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// BackupInstancesTriggerRehydrateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type BackupInstancesTriggerRehydrateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(BackupInstancesClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *BackupInstancesTriggerRehydrateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for BackupInstancesTriggerRehydrateFuture.Result. +func (future *BackupInstancesTriggerRehydrateFuture) result(client BackupInstancesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesTriggerRehydrateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("dataprotection.BackupInstancesTriggerRehydrateFuture") + return + } + ar.Response = future.Response() + return +} + +// BackupInstancesTriggerRestoreFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type BackupInstancesTriggerRestoreFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(BackupInstancesClient) (OperationJobExtendedInfo, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *BackupInstancesTriggerRestoreFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for BackupInstancesTriggerRestoreFuture.Result. +func (future *BackupInstancesTriggerRestoreFuture) result(client BackupInstancesClient) (ojei OperationJobExtendedInfo, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesTriggerRestoreFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ojei.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("dataprotection.BackupInstancesTriggerRestoreFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ojei.Response.Response, err = future.GetResult(sender); err == nil && ojei.Response.Response.StatusCode != http.StatusNoContent { + ojei, err = client.TriggerRestoreResponder(ojei.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesTriggerRestoreFuture", "Result", ojei.Response.Response, "Failure responding to request") + } + } + return +} + +// BackupInstancesValidateForBackupFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type BackupInstancesValidateForBackupFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(BackupInstancesClient) (OperationJobExtendedInfo, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *BackupInstancesValidateForBackupFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for BackupInstancesValidateForBackupFuture.Result. +func (future *BackupInstancesValidateForBackupFuture) result(client BackupInstancesClient) (ojei OperationJobExtendedInfo, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesValidateForBackupFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ojei.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("dataprotection.BackupInstancesValidateForBackupFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ojei.Response.Response, err = future.GetResult(sender); err == nil && ojei.Response.Response.StatusCode != http.StatusNoContent { + ojei, err = client.ValidateForBackupResponder(ojei.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesValidateForBackupFuture", "Result", ojei.Response.Response, "Failure responding to request") + } + } + return +} + +// BackupInstancesValidateRestoreFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type BackupInstancesValidateRestoreFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(BackupInstancesClient) (OperationJobExtendedInfo, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *BackupInstancesValidateRestoreFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for BackupInstancesValidateRestoreFuture.Result. +func (future *BackupInstancesValidateRestoreFuture) result(client BackupInstancesClient) (ojei OperationJobExtendedInfo, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesValidateRestoreFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ojei.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("dataprotection.BackupInstancesValidateRestoreFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ojei.Response.Response, err = future.GetResult(sender); err == nil && ojei.Response.Response.StatusCode != http.StatusNoContent { + ojei, err = client.ValidateRestoreResponder(ojei.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupInstancesValidateRestoreFuture", "Result", ojei.Response.Response, "Failure responding to request") + } + } + return +} + +// BasicBackupParameters backupParameters base +type BasicBackupParameters interface { + AsAzureBackupParams() (*AzureBackupParams, bool) + AsBackupParameters() (*BackupParameters, bool) +} + +// BackupParameters backupParameters base +type BackupParameters struct { + // ObjectType - Possible values include: 'ObjectTypeBackupParameters', 'ObjectTypeAzureBackupParams' + ObjectType ObjectTypeBasicBackupParameters `json:"objectType,omitempty"` +} + +func unmarshalBasicBackupParameters(body []byte) (BasicBackupParameters, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["objectType"] { + case string(ObjectTypeAzureBackupParams): + var abp AzureBackupParams + err := json.Unmarshal(body, &abp) + return abp, err + default: + var bp BackupParameters + err := json.Unmarshal(body, &bp) + return bp, err + } +} +func unmarshalBasicBackupParametersArray(body []byte) ([]BasicBackupParameters, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + bpArray := make([]BasicBackupParameters, len(rawMessages)) + + for index, rawMessage := range rawMessages { + bp, err := unmarshalBasicBackupParameters(*rawMessage) + if err != nil { + return nil, err + } + bpArray[index] = bp + } + return bpArray, nil +} + +// MarshalJSON is the custom marshaler for BackupParameters. +func (bp BackupParameters) MarshalJSON() ([]byte, error) { + bp.ObjectType = ObjectTypeBackupParameters + objectMap := make(map[string]interface{}) + if bp.ObjectType != "" { + objectMap["objectType"] = bp.ObjectType + } + return json.Marshal(objectMap) +} + +// AsAzureBackupParams is the BasicBackupParameters implementation for BackupParameters. +func (bp BackupParameters) AsAzureBackupParams() (*AzureBackupParams, bool) { + return nil, false +} + +// AsBackupParameters is the BasicBackupParameters implementation for BackupParameters. +func (bp BackupParameters) AsBackupParameters() (*BackupParameters, bool) { + return &bp, true +} + +// AsBasicBackupParameters is the BasicBackupParameters implementation for BackupParameters. +func (bp BackupParameters) AsBasicBackupParameters() (BasicBackupParameters, bool) { + return &bp, true +} + +// BackupPolicy rule based backup policy +type BackupPolicy struct { + // PolicyRules - Policy rule dictionary that contains rules for each backuptype i.e Full/Incremental/Logs etc + PolicyRules *[]BasicBasePolicyRule `json:"policyRules,omitempty"` + // DatasourceTypes - Type of datasource for the backup management + DatasourceTypes *[]string `json:"datasourceTypes,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeBaseBackupPolicy', 'ObjectTypeBackupPolicy' + ObjectType ObjectTypeBasicBaseBackupPolicy `json:"objectType,omitempty"` +} + +// MarshalJSON is the custom marshaler for BackupPolicy. +func (bp BackupPolicy) MarshalJSON() ([]byte, error) { + bp.ObjectType = ObjectTypeBackupPolicy + objectMap := make(map[string]interface{}) + if bp.PolicyRules != nil { + objectMap["policyRules"] = bp.PolicyRules + } + if bp.DatasourceTypes != nil { + objectMap["datasourceTypes"] = bp.DatasourceTypes + } + if bp.ObjectType != "" { + objectMap["objectType"] = bp.ObjectType + } + return json.Marshal(objectMap) +} + +// AsBackupPolicy is the BasicBaseBackupPolicy implementation for BackupPolicy. +func (bp BackupPolicy) AsBackupPolicy() (*BackupPolicy, bool) { + return &bp, true +} + +// AsBaseBackupPolicy is the BasicBaseBackupPolicy implementation for BackupPolicy. +func (bp BackupPolicy) AsBaseBackupPolicy() (*BaseBackupPolicy, bool) { + return nil, false +} + +// AsBasicBaseBackupPolicy is the BasicBaseBackupPolicy implementation for BackupPolicy. +func (bp BackupPolicy) AsBasicBaseBackupPolicy() (BasicBaseBackupPolicy, bool) { + return &bp, true +} + +// UnmarshalJSON is the custom unmarshaler for BackupPolicy struct. +func (bp *BackupPolicy) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "policyRules": + if v != nil { + policyRules, err := unmarshalBasicBasePolicyRuleArray(*v) + if err != nil { + return err + } + bp.PolicyRules = &policyRules + } + case "datasourceTypes": + if v != nil { + var datasourceTypes []string + err = json.Unmarshal(*v, &datasourceTypes) + if err != nil { + return err + } + bp.DatasourceTypes = &datasourceTypes + } + case "objectType": + if v != nil { + var objectType ObjectTypeBasicBaseBackupPolicy + err = json.Unmarshal(*v, &objectType) + if err != nil { + return err + } + bp.ObjectType = objectType + } + } + } + + return nil +} + +// BackupSchedule schedule for backup +type BackupSchedule struct { + // RepeatingTimeIntervals - ISO 8601 repeating time interval format + RepeatingTimeIntervals *[]string `json:"repeatingTimeIntervals,omitempty"` +} + +// BackupVault backup Vault +type BackupVault struct { + // ProvisioningState - READ-ONLY; Provisioning state of the BackupVault resource. Possible values include: 'Failed', 'Provisioning', 'Succeeded', 'Unknown', 'Updating' + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + // StorageSettings - Storage Settings + StorageSettings *[]StorageSetting `json:"storageSettings,omitempty"` +} + +// MarshalJSON is the custom marshaler for BackupVault. +func (bv BackupVault) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if bv.StorageSettings != nil { + objectMap["storageSettings"] = bv.StorageSettings + } + return json.Marshal(objectMap) +} + +// BackupVaultResource backup Vault Resource +type BackupVaultResource struct { + autorest.Response `json:"-"` + // Properties - BackupVaultResource properties + Properties *BackupVault `json:"properties,omitempty"` + // ETag - Optional ETag. + ETag *string `json:"eTag,omitempty"` + // ID - READ-ONLY; Resource Id represents the complete path to the resource. + ID *string `json:"id,omitempty"` + // Identity - Input Managed Identity Details + Identity *DppIdentityDetails `json:"identity,omitempty"` + // Location - Resource location. + Location *string `json:"location,omitempty"` + // Name - READ-ONLY; Resource name associated with the resource. + Name *string `json:"name,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` + // Type - READ-ONLY; Resource type represents the complete path of the form Namespace/ResourceType/ResourceType/... + Type *string `json:"type,omitempty"` + SystemData *SystemData `json:"systemData,omitempty"` +} + +// MarshalJSON is the custom marshaler for BackupVaultResource. +func (bvr BackupVaultResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if bvr.Properties != nil { + objectMap["properties"] = bvr.Properties + } + if bvr.ETag != nil { + objectMap["eTag"] = bvr.ETag + } + if bvr.Identity != nil { + objectMap["identity"] = bvr.Identity + } + if bvr.Location != nil { + objectMap["location"] = bvr.Location + } + if bvr.Tags != nil { + objectMap["tags"] = bvr.Tags + } + if bvr.SystemData != nil { + objectMap["systemData"] = bvr.SystemData + } + return json.Marshal(objectMap) +} + +// BackupVaultResourceList list of BackupVault resources +type BackupVaultResourceList struct { + autorest.Response `json:"-"` + // Value - List of resources. + Value *[]BackupVaultResource `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of resources. Call ListNext() fetches next page of resources. + NextLink *string `json:"nextLink,omitempty"` +} + +// BackupVaultResourceListIterator provides access to a complete listing of BackupVaultResource values. +type BackupVaultResourceListIterator struct { + i int + page BackupVaultResourceListPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *BackupVaultResourceListIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupVaultResourceListIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *BackupVaultResourceListIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter BackupVaultResourceListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter BackupVaultResourceListIterator) Response() BackupVaultResourceList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter BackupVaultResourceListIterator) Value() BackupVaultResource { + if !iter.page.NotDone() { + return BackupVaultResource{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the BackupVaultResourceListIterator type. +func NewBackupVaultResourceListIterator(page BackupVaultResourceListPage) BackupVaultResourceListIterator { + return BackupVaultResourceListIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (bvrl BackupVaultResourceList) IsEmpty() bool { + return bvrl.Value == nil || len(*bvrl.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (bvrl BackupVaultResourceList) hasNextLink() bool { + return bvrl.NextLink != nil && len(*bvrl.NextLink) != 0 +} + +// backupVaultResourceListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (bvrl BackupVaultResourceList) backupVaultResourceListPreparer(ctx context.Context) (*http.Request, error) { + if !bvrl.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(bvrl.NextLink))) +} + +// BackupVaultResourceListPage contains a page of BackupVaultResource values. +type BackupVaultResourceListPage struct { + fn func(context.Context, BackupVaultResourceList) (BackupVaultResourceList, error) + bvrl BackupVaultResourceList +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *BackupVaultResourceListPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BackupVaultResourceListPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.bvrl) + if err != nil { + return err + } + page.bvrl = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *BackupVaultResourceListPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page BackupVaultResourceListPage) NotDone() bool { + return !page.bvrl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page BackupVaultResourceListPage) Response() BackupVaultResourceList { + return page.bvrl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page BackupVaultResourceListPage) Values() []BackupVaultResource { + if page.bvrl.IsEmpty() { + return nil + } + return *page.bvrl.Value +} + +// Creates a new instance of the BackupVaultResourceListPage type. +func NewBackupVaultResourceListPage(cur BackupVaultResourceList, getNextPage func(context.Context, BackupVaultResourceList) (BackupVaultResourceList, error)) BackupVaultResourceListPage { + return BackupVaultResourceListPage{ + fn: getNextPage, + bvrl: cur, + } +} + +// BackupVaultsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type BackupVaultsCreateOrUpdateFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(BackupVaultsClient) (BackupVaultResource, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *BackupVaultsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for BackupVaultsCreateOrUpdateFuture.Result. +func (future *BackupVaultsCreateOrUpdateFuture) result(client BackupVaultsClient) (bvr BackupVaultResource, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + bvr.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("dataprotection.BackupVaultsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if bvr.Response.Response, err = future.GetResult(sender); err == nil && bvr.Response.Response.StatusCode != http.StatusNoContent { + bvr, err = client.CreateOrUpdateResponder(bvr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsCreateOrUpdateFuture", "Result", bvr.Response.Response, "Failure responding to request") + } + } + return +} + +// BackupVaultsPatchFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type BackupVaultsPatchFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(BackupVaultsClient) (BackupVaultResource, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *BackupVaultsPatchFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for BackupVaultsPatchFuture.Result. +func (future *BackupVaultsPatchFuture) result(client BackupVaultsClient) (bvr BackupVaultResource, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsPatchFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + bvr.Response.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("dataprotection.BackupVaultsPatchFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if bvr.Response.Response, err = future.GetResult(sender); err == nil && bvr.Response.Response.StatusCode != http.StatusNoContent { + bvr, err = client.PatchResponder(bvr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.BackupVaultsPatchFuture", "Result", bvr.Response.Response, "Failure responding to request") + } + } + return +} + +// BasicBaseBackupPolicy backupPolicy base +type BasicBaseBackupPolicy interface { + AsBackupPolicy() (*BackupPolicy, bool) + AsBaseBackupPolicy() (*BaseBackupPolicy, bool) +} + +// BaseBackupPolicy backupPolicy base +type BaseBackupPolicy struct { + // DatasourceTypes - Type of datasource for the backup management + DatasourceTypes *[]string `json:"datasourceTypes,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeBaseBackupPolicy', 'ObjectTypeBackupPolicy' + ObjectType ObjectTypeBasicBaseBackupPolicy `json:"objectType,omitempty"` +} + +func unmarshalBasicBaseBackupPolicy(body []byte) (BasicBaseBackupPolicy, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["objectType"] { + case string(ObjectTypeBackupPolicy): + var bp BackupPolicy + err := json.Unmarshal(body, &bp) + return bp, err + default: + var bbp BaseBackupPolicy + err := json.Unmarshal(body, &bbp) + return bbp, err + } +} +func unmarshalBasicBaseBackupPolicyArray(body []byte) ([]BasicBaseBackupPolicy, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + bbpArray := make([]BasicBaseBackupPolicy, len(rawMessages)) + + for index, rawMessage := range rawMessages { + bbp, err := unmarshalBasicBaseBackupPolicy(*rawMessage) + if err != nil { + return nil, err + } + bbpArray[index] = bbp + } + return bbpArray, nil +} + +// MarshalJSON is the custom marshaler for BaseBackupPolicy. +func (bbp BaseBackupPolicy) MarshalJSON() ([]byte, error) { + bbp.ObjectType = ObjectTypeBaseBackupPolicy + objectMap := make(map[string]interface{}) + if bbp.DatasourceTypes != nil { + objectMap["datasourceTypes"] = bbp.DatasourceTypes + } + if bbp.ObjectType != "" { + objectMap["objectType"] = bbp.ObjectType + } + return json.Marshal(objectMap) +} + +// AsBackupPolicy is the BasicBaseBackupPolicy implementation for BaseBackupPolicy. +func (bbp BaseBackupPolicy) AsBackupPolicy() (*BackupPolicy, bool) { + return nil, false +} + +// AsBaseBackupPolicy is the BasicBaseBackupPolicy implementation for BaseBackupPolicy. +func (bbp BaseBackupPolicy) AsBaseBackupPolicy() (*BaseBackupPolicy, bool) { + return &bbp, true +} + +// AsBasicBaseBackupPolicy is the BasicBaseBackupPolicy implementation for BaseBackupPolicy. +func (bbp BaseBackupPolicy) AsBasicBaseBackupPolicy() (BasicBaseBackupPolicy, bool) { + return &bbp, true +} + +// BaseBackupPolicyResource baseBackupPolicy resource +type BaseBackupPolicyResource struct { + autorest.Response `json:"-"` + // Properties - BaseBackupPolicyResource properties + Properties BasicBaseBackupPolicy `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id represents the complete path to the resource. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name associated with the resource. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type represents the complete path of the form Namespace/ResourceType/ResourceType/... + Type *string `json:"type,omitempty"` + SystemData *SystemData `json:"systemData,omitempty"` +} + +// MarshalJSON is the custom marshaler for BaseBackupPolicyResource. +func (bbpr BaseBackupPolicyResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + objectMap["properties"] = bbpr.Properties + if bbpr.SystemData != nil { + objectMap["systemData"] = bbpr.SystemData + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for BaseBackupPolicyResource struct. +func (bbpr *BaseBackupPolicyResource) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + properties, err := unmarshalBasicBaseBackupPolicy(*v) + if err != nil { + return err + } + bbpr.Properties = properties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + bbpr.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + bbpr.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + bbpr.Type = &typeVar + } + case "systemData": + if v != nil { + var systemData SystemData + err = json.Unmarshal(*v, &systemData) + if err != nil { + return err + } + bbpr.SystemData = &systemData + } + } + } + + return nil +} + +// BaseBackupPolicyResourceList list of BaseBackupPolicy resources +type BaseBackupPolicyResourceList struct { + autorest.Response `json:"-"` + // Value - List of resources. + Value *[]BaseBackupPolicyResource `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of resources. Call ListNext() fetches next page of resources. + NextLink *string `json:"nextLink,omitempty"` +} + +// BaseBackupPolicyResourceListIterator provides access to a complete listing of BaseBackupPolicyResource +// values. +type BaseBackupPolicyResourceListIterator struct { + i int + page BaseBackupPolicyResourceListPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *BaseBackupPolicyResourceListIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseBackupPolicyResourceListIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *BaseBackupPolicyResourceListIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter BaseBackupPolicyResourceListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter BaseBackupPolicyResourceListIterator) Response() BaseBackupPolicyResourceList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter BaseBackupPolicyResourceListIterator) Value() BaseBackupPolicyResource { + if !iter.page.NotDone() { + return BaseBackupPolicyResource{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the BaseBackupPolicyResourceListIterator type. +func NewBaseBackupPolicyResourceListIterator(page BaseBackupPolicyResourceListPage) BaseBackupPolicyResourceListIterator { + return BaseBackupPolicyResourceListIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (bbprl BaseBackupPolicyResourceList) IsEmpty() bool { + return bbprl.Value == nil || len(*bbprl.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (bbprl BaseBackupPolicyResourceList) hasNextLink() bool { + return bbprl.NextLink != nil && len(*bbprl.NextLink) != 0 +} + +// baseBackupPolicyResourceListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (bbprl BaseBackupPolicyResourceList) baseBackupPolicyResourceListPreparer(ctx context.Context) (*http.Request, error) { + if !bbprl.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(bbprl.NextLink))) +} + +// BaseBackupPolicyResourceListPage contains a page of BaseBackupPolicyResource values. +type BaseBackupPolicyResourceListPage struct { + fn func(context.Context, BaseBackupPolicyResourceList) (BaseBackupPolicyResourceList, error) + bbprl BaseBackupPolicyResourceList +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *BaseBackupPolicyResourceListPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseBackupPolicyResourceListPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.bbprl) + if err != nil { + return err + } + page.bbprl = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *BaseBackupPolicyResourceListPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page BaseBackupPolicyResourceListPage) NotDone() bool { + return !page.bbprl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page BaseBackupPolicyResourceListPage) Response() BaseBackupPolicyResourceList { + return page.bbprl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page BaseBackupPolicyResourceListPage) Values() []BaseBackupPolicyResource { + if page.bbprl.IsEmpty() { + return nil + } + return *page.bbprl.Value +} + +// Creates a new instance of the BaseBackupPolicyResourceListPage type. +func NewBaseBackupPolicyResourceListPage(cur BaseBackupPolicyResourceList, getNextPage func(context.Context, BaseBackupPolicyResourceList) (BaseBackupPolicyResourceList, error)) BaseBackupPolicyResourceListPage { + return BaseBackupPolicyResourceListPage{ + fn: getNextPage, + bbprl: cur, + } +} + +// BasicBasePolicyRule basePolicy Rule +type BasicBasePolicyRule interface { + AsAzureBackupRule() (*AzureBackupRule, bool) + AsAzureRetentionRule() (*AzureRetentionRule, bool) + AsBasePolicyRule() (*BasePolicyRule, bool) +} + +// BasePolicyRule basePolicy Rule +type BasePolicyRule struct { + Name *string `json:"name,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeBasePolicyRule', 'ObjectTypeAzureBackupRule', 'ObjectTypeAzureRetentionRule' + ObjectType ObjectTypeBasicBasePolicyRule `json:"objectType,omitempty"` +} + +func unmarshalBasicBasePolicyRule(body []byte) (BasicBasePolicyRule, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["objectType"] { + case string(ObjectTypeAzureBackupRule): + var abr AzureBackupRule + err := json.Unmarshal(body, &abr) + return abr, err + case string(ObjectTypeAzureRetentionRule): + var arr AzureRetentionRule + err := json.Unmarshal(body, &arr) + return arr, err + default: + var bpr BasePolicyRule + err := json.Unmarshal(body, &bpr) + return bpr, err + } +} +func unmarshalBasicBasePolicyRuleArray(body []byte) ([]BasicBasePolicyRule, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + bprArray := make([]BasicBasePolicyRule, len(rawMessages)) + + for index, rawMessage := range rawMessages { + bpr, err := unmarshalBasicBasePolicyRule(*rawMessage) + if err != nil { + return nil, err + } + bprArray[index] = bpr + } + return bprArray, nil +} + +// MarshalJSON is the custom marshaler for BasePolicyRule. +func (bpr BasePolicyRule) MarshalJSON() ([]byte, error) { + bpr.ObjectType = ObjectTypeBasePolicyRule + objectMap := make(map[string]interface{}) + if bpr.Name != nil { + objectMap["name"] = bpr.Name + } + if bpr.ObjectType != "" { + objectMap["objectType"] = bpr.ObjectType + } + return json.Marshal(objectMap) +} + +// AsAzureBackupRule is the BasicBasePolicyRule implementation for BasePolicyRule. +func (bpr BasePolicyRule) AsAzureBackupRule() (*AzureBackupRule, bool) { + return nil, false +} + +// AsAzureRetentionRule is the BasicBasePolicyRule implementation for BasePolicyRule. +func (bpr BasePolicyRule) AsAzureRetentionRule() (*AzureRetentionRule, bool) { + return nil, false +} + +// AsBasePolicyRule is the BasicBasePolicyRule implementation for BasePolicyRule. +func (bpr BasePolicyRule) AsBasePolicyRule() (*BasePolicyRule, bool) { + return &bpr, true +} + +// AsBasicBasePolicyRule is the BasicBasePolicyRule implementation for BasePolicyRule. +func (bpr BasePolicyRule) AsBasicBasePolicyRule() (BasicBasePolicyRule, bool) { + return &bpr, true +} + +// CheckNameAvailabilityRequest checkNameAvailability Request +type CheckNameAvailabilityRequest struct { + // Name - Resource name for which availability needs to be checked + Name *string `json:"name,omitempty"` + // Type - Describes the Resource type: Microsoft.DataProtection/BackupVaults + Type *string `json:"type,omitempty"` +} + +// CheckNameAvailabilityResult checkNameAvailability Result +type CheckNameAvailabilityResult struct { + autorest.Response `json:"-"` + // Message - Gets or sets the message. + Message *string `json:"message,omitempty"` + // NameAvailable - Gets or sets a value indicating whether [name available]. + NameAvailable *bool `json:"nameAvailable,omitempty"` + // Reason - Gets or sets the reason. + Reason *string `json:"reason,omitempty"` +} + +// ClientDiscoveryDisplay localized display information of an operation. +type ClientDiscoveryDisplay struct { + // Description - Description of the operation having details of what operation is about. + Description *string `json:"description,omitempty"` + // Operation - Operations Name itself. + Operation *string `json:"operation,omitempty"` + // Provider - Name of the provider for display purposes + Provider *string `json:"provider,omitempty"` + // Resource - ResourceType for which this Operation can be performed. + Resource *string `json:"resource,omitempty"` +} + +// ClientDiscoveryForLogSpecification class to represent shoebox log specification in json client +// discovery. +type ClientDiscoveryForLogSpecification struct { + // BlobDuration - blob duration of shoebox log specification + BlobDuration *string `json:"blobDuration,omitempty"` + // DisplayName - Localized display name + DisplayName *string `json:"displayName,omitempty"` + // Name - Name for shoebox log specification. + Name *string `json:"name,omitempty"` +} + +// ClientDiscoveryForProperties class to represent shoebox properties in json client discovery. +type ClientDiscoveryForProperties struct { + // ServiceSpecification - Operation properties. + ServiceSpecification *ClientDiscoveryForServiceSpecification `json:"serviceSpecification,omitempty"` +} + +// ClientDiscoveryForServiceSpecification class to represent shoebox service specification in json client +// discovery. +type ClientDiscoveryForServiceSpecification struct { + // LogSpecifications - List of log specifications of this operation. + LogSpecifications *[]ClientDiscoveryForLogSpecification `json:"logSpecifications,omitempty"` +} + +// ClientDiscoveryResponse operations List response which contains list of available APIs. +type ClientDiscoveryResponse struct { + autorest.Response `json:"-"` + // NextLink - Link to the next chunk of Response. + NextLink *string `json:"nextLink,omitempty"` + // Value - List of available operations. + Value *[]ClientDiscoveryValueForSingleAPI `json:"value,omitempty"` +} + +// ClientDiscoveryResponseIterator provides access to a complete listing of +// ClientDiscoveryValueForSingleAPI values. +type ClientDiscoveryResponseIterator struct { + i int + page ClientDiscoveryResponsePage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ClientDiscoveryResponseIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ClientDiscoveryResponseIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ClientDiscoveryResponseIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ClientDiscoveryResponseIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ClientDiscoveryResponseIterator) Response() ClientDiscoveryResponse { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ClientDiscoveryResponseIterator) Value() ClientDiscoveryValueForSingleAPI { + if !iter.page.NotDone() { + return ClientDiscoveryValueForSingleAPI{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ClientDiscoveryResponseIterator type. +func NewClientDiscoveryResponseIterator(page ClientDiscoveryResponsePage) ClientDiscoveryResponseIterator { + return ClientDiscoveryResponseIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (cdr ClientDiscoveryResponse) IsEmpty() bool { + return cdr.Value == nil || len(*cdr.Value) == 0 +} + +// hasNextLink returns true if the NextLink is not empty. +func (cdr ClientDiscoveryResponse) hasNextLink() bool { + return cdr.NextLink != nil && len(*cdr.NextLink) != 0 +} + +// clientDiscoveryResponsePreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (cdr ClientDiscoveryResponse) clientDiscoveryResponsePreparer(ctx context.Context) (*http.Request, error) { + if !cdr.hasNextLink() { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(cdr.NextLink))) +} + +// ClientDiscoveryResponsePage contains a page of ClientDiscoveryValueForSingleAPI values. +type ClientDiscoveryResponsePage struct { + fn func(context.Context, ClientDiscoveryResponse) (ClientDiscoveryResponse, error) + cdr ClientDiscoveryResponse +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ClientDiscoveryResponsePage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ClientDiscoveryResponsePage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + for { + next, err := page.fn(ctx, page.cdr) + if err != nil { + return err + } + page.cdr = next + if !next.hasNextLink() || !next.IsEmpty() { + break + } + } + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ClientDiscoveryResponsePage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ClientDiscoveryResponsePage) NotDone() bool { + return !page.cdr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ClientDiscoveryResponsePage) Response() ClientDiscoveryResponse { + return page.cdr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ClientDiscoveryResponsePage) Values() []ClientDiscoveryValueForSingleAPI { + if page.cdr.IsEmpty() { + return nil + } + return *page.cdr.Value +} + +// Creates a new instance of the ClientDiscoveryResponsePage type. +func NewClientDiscoveryResponsePage(cur ClientDiscoveryResponse, getNextPage func(context.Context, ClientDiscoveryResponse) (ClientDiscoveryResponse, error)) ClientDiscoveryResponsePage { + return ClientDiscoveryResponsePage{ + fn: getNextPage, + cdr: cur, + } +} + +// ClientDiscoveryValueForSingleAPI available operation details. +type ClientDiscoveryValueForSingleAPI struct { + // Display - Contains the localized display information for this particular operation + Display *ClientDiscoveryDisplay `json:"display,omitempty"` + // Name - Name of the Operation. + Name *string `json:"name,omitempty"` + // IsDataAction - Indicates whether the operation is a data action + IsDataAction *bool `json:"isDataAction,omitempty"` + // Origin - The intended executor of the operation;governs the display of the operation in the RBAC UX and the audit logs UX + Origin *string `json:"origin,omitempty"` + // Properties - Properties for the given operation. + Properties *ClientDiscoveryForProperties `json:"properties,omitempty"` +} + +// CloudError an error response from Azure Backup. +type CloudError struct { + Error *Error `json:"error,omitempty"` +} + +// CopyOnExpiryOption copy on Expiry Option +type CopyOnExpiryOption struct { + // ObjectType - Possible values include: 'ObjectTypeCopyOption', 'ObjectTypeCopyOnExpiryOption', 'ObjectTypeCustomCopyOption', 'ObjectTypeImmediateCopyOption' + ObjectType ObjectTypeBasicCopyOption `json:"objectType,omitempty"` +} + +// MarshalJSON is the custom marshaler for CopyOnExpiryOption. +func (coeo CopyOnExpiryOption) MarshalJSON() ([]byte, error) { + coeo.ObjectType = ObjectTypeCopyOnExpiryOption + objectMap := make(map[string]interface{}) + if coeo.ObjectType != "" { + objectMap["objectType"] = coeo.ObjectType + } + return json.Marshal(objectMap) +} + +// AsCopyOnExpiryOption is the BasicCopyOption implementation for CopyOnExpiryOption. +func (coeo CopyOnExpiryOption) AsCopyOnExpiryOption() (*CopyOnExpiryOption, bool) { + return &coeo, true +} + +// AsCustomCopyOption is the BasicCopyOption implementation for CopyOnExpiryOption. +func (coeo CopyOnExpiryOption) AsCustomCopyOption() (*CustomCopyOption, bool) { + return nil, false +} + +// AsImmediateCopyOption is the BasicCopyOption implementation for CopyOnExpiryOption. +func (coeo CopyOnExpiryOption) AsImmediateCopyOption() (*ImmediateCopyOption, bool) { + return nil, false +} + +// AsCopyOption is the BasicCopyOption implementation for CopyOnExpiryOption. +func (coeo CopyOnExpiryOption) AsCopyOption() (*CopyOption, bool) { + return nil, false +} + +// AsBasicCopyOption is the BasicCopyOption implementation for CopyOnExpiryOption. +func (coeo CopyOnExpiryOption) AsBasicCopyOption() (BasicCopyOption, bool) { + return &coeo, true +} + +// BasicCopyOption options to copy +type BasicCopyOption interface { + AsCopyOnExpiryOption() (*CopyOnExpiryOption, bool) + AsCustomCopyOption() (*CustomCopyOption, bool) + AsImmediateCopyOption() (*ImmediateCopyOption, bool) + AsCopyOption() (*CopyOption, bool) +} + +// CopyOption options to copy +type CopyOption struct { + // ObjectType - Possible values include: 'ObjectTypeCopyOption', 'ObjectTypeCopyOnExpiryOption', 'ObjectTypeCustomCopyOption', 'ObjectTypeImmediateCopyOption' + ObjectType ObjectTypeBasicCopyOption `json:"objectType,omitempty"` +} + +func unmarshalBasicCopyOption(body []byte) (BasicCopyOption, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["objectType"] { + case string(ObjectTypeCopyOnExpiryOption): + var coeo CopyOnExpiryOption + err := json.Unmarshal(body, &coeo) + return coeo, err + case string(ObjectTypeCustomCopyOption): + var cco CustomCopyOption + err := json.Unmarshal(body, &cco) + return cco, err + case string(ObjectTypeImmediateCopyOption): + var ico ImmediateCopyOption + err := json.Unmarshal(body, &ico) + return ico, err + default: + var co CopyOption + err := json.Unmarshal(body, &co) + return co, err + } +} +func unmarshalBasicCopyOptionArray(body []byte) ([]BasicCopyOption, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + coArray := make([]BasicCopyOption, len(rawMessages)) + + for index, rawMessage := range rawMessages { + co, err := unmarshalBasicCopyOption(*rawMessage) + if err != nil { + return nil, err + } + coArray[index] = co + } + return coArray, nil +} + +// MarshalJSON is the custom marshaler for CopyOption. +func (co CopyOption) MarshalJSON() ([]byte, error) { + co.ObjectType = ObjectTypeCopyOption + objectMap := make(map[string]interface{}) + if co.ObjectType != "" { + objectMap["objectType"] = co.ObjectType + } + return json.Marshal(objectMap) +} + +// AsCopyOnExpiryOption is the BasicCopyOption implementation for CopyOption. +func (co CopyOption) AsCopyOnExpiryOption() (*CopyOnExpiryOption, bool) { + return nil, false +} + +// AsCustomCopyOption is the BasicCopyOption implementation for CopyOption. +func (co CopyOption) AsCustomCopyOption() (*CustomCopyOption, bool) { + return nil, false +} + +// AsImmediateCopyOption is the BasicCopyOption implementation for CopyOption. +func (co CopyOption) AsImmediateCopyOption() (*ImmediateCopyOption, bool) { + return nil, false +} + +// AsCopyOption is the BasicCopyOption implementation for CopyOption. +func (co CopyOption) AsCopyOption() (*CopyOption, bool) { + return &co, true +} + +// AsBasicCopyOption is the BasicCopyOption implementation for CopyOption. +func (co CopyOption) AsBasicCopyOption() (BasicCopyOption, bool) { + return &co, true +} + +// CustomCopyOption duration based custom options to copy +type CustomCopyOption struct { + // Duration - Data copied after given timespan + Duration *string `json:"duration,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeCopyOption', 'ObjectTypeCopyOnExpiryOption', 'ObjectTypeCustomCopyOption', 'ObjectTypeImmediateCopyOption' + ObjectType ObjectTypeBasicCopyOption `json:"objectType,omitempty"` +} + +// MarshalJSON is the custom marshaler for CustomCopyOption. +func (cco CustomCopyOption) MarshalJSON() ([]byte, error) { + cco.ObjectType = ObjectTypeCustomCopyOption + objectMap := make(map[string]interface{}) + if cco.Duration != nil { + objectMap["duration"] = cco.Duration + } + if cco.ObjectType != "" { + objectMap["objectType"] = cco.ObjectType + } + return json.Marshal(objectMap) +} + +// AsCopyOnExpiryOption is the BasicCopyOption implementation for CustomCopyOption. +func (cco CustomCopyOption) AsCopyOnExpiryOption() (*CopyOnExpiryOption, bool) { + return nil, false +} + +// AsCustomCopyOption is the BasicCopyOption implementation for CustomCopyOption. +func (cco CustomCopyOption) AsCustomCopyOption() (*CustomCopyOption, bool) { + return &cco, true +} + +// AsImmediateCopyOption is the BasicCopyOption implementation for CustomCopyOption. +func (cco CustomCopyOption) AsImmediateCopyOption() (*ImmediateCopyOption, bool) { + return nil, false +} + +// AsCopyOption is the BasicCopyOption implementation for CustomCopyOption. +func (cco CustomCopyOption) AsCopyOption() (*CopyOption, bool) { + return nil, false +} + +// AsBasicCopyOption is the BasicCopyOption implementation for CustomCopyOption. +func (cco CustomCopyOption) AsBasicCopyOption() (BasicCopyOption, bool) { + return &cco, true +} + +// Datasource datasource to be backed up +type Datasource struct { + // DatasourceType - DatasourceType of the resource. + DatasourceType *string `json:"datasourceType,omitempty"` + // ObjectType - Type of Datasource object, used to initialize the right inherited type + ObjectType *string `json:"objectType,omitempty"` + // ResourceID - Full ARM ID of the resource. For azure resources, this is ARM ID. For non azure resources, this will be the ID created by backup service via Fabric/Vault. + ResourceID *string `json:"resourceID,omitempty"` + // ResourceLocation - Location of datasource. + ResourceLocation *string `json:"resourceLocation,omitempty"` + // ResourceName - Unique identifier of the resource in the context of parent. + ResourceName *string `json:"resourceName,omitempty"` + // ResourceType - Resource Type of Datasource. + ResourceType *string `json:"resourceType,omitempty"` + // ResourceURI - Uri of the resource. + ResourceURI *string `json:"resourceUri,omitempty"` +} + +// DatasourceSet datasourceSet details of datasource to be backed up +type DatasourceSet struct { + // DatasourceType - DatasourceType of the resource. + DatasourceType *string `json:"datasourceType,omitempty"` + // ObjectType - Type of Datasource object, used to initialize the right inherited type + ObjectType *string `json:"objectType,omitempty"` + // ResourceID - Full ARM ID of the resource. For azure resources, this is ARM ID. For non azure resources, this will be the ID created by backup service via Fabric/Vault. + ResourceID *string `json:"resourceID,omitempty"` + // ResourceLocation - Location of datasource. + ResourceLocation *string `json:"resourceLocation,omitempty"` + // ResourceName - Unique identifier of the resource in the context of parent. + ResourceName *string `json:"resourceName,omitempty"` + // ResourceType - Resource Type of Datasource. + ResourceType *string `json:"resourceType,omitempty"` + // ResourceURI - Uri of the resource. + ResourceURI *string `json:"resourceUri,omitempty"` +} + +// DataStoreInfoBase dataStoreInfo base +type DataStoreInfoBase struct { + // DataStoreType - type of datastore; Operational/Vault/Archive. Possible values include: 'OperationalStore', 'VaultStore', 'ArchiveStore' + DataStoreType DataStoreTypes `json:"dataStoreType,omitempty"` + // ObjectType - Type of Datasource object, used to initialize the right inherited type + ObjectType *string `json:"objectType,omitempty"` +} + +// BasicDataStoreParameters parameters for DataStore +type BasicDataStoreParameters interface { + AsAzureOperationalStoreParameters() (*AzureOperationalStoreParameters, bool) + AsDataStoreParameters() (*DataStoreParameters, bool) +} + +// DataStoreParameters parameters for DataStore +type DataStoreParameters struct { + // DataStoreType - type of datastore; Operational/Vault/Archive. Possible values include: 'OperationalStore', 'VaultStore', 'ArchiveStore' + DataStoreType DataStoreTypes `json:"dataStoreType,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeDataStoreParameters', 'ObjectTypeAzureOperationalStoreParameters' + ObjectType ObjectTypeBasicDataStoreParameters `json:"objectType,omitempty"` +} + +func unmarshalBasicDataStoreParameters(body []byte) (BasicDataStoreParameters, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["objectType"] { + case string(ObjectTypeAzureOperationalStoreParameters): + var aosp AzureOperationalStoreParameters + err := json.Unmarshal(body, &aosp) + return aosp, err + default: + var dsp DataStoreParameters + err := json.Unmarshal(body, &dsp) + return dsp, err + } +} +func unmarshalBasicDataStoreParametersArray(body []byte) ([]BasicDataStoreParameters, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + dspArray := make([]BasicDataStoreParameters, len(rawMessages)) + + for index, rawMessage := range rawMessages { + dsp, err := unmarshalBasicDataStoreParameters(*rawMessage) + if err != nil { + return nil, err + } + dspArray[index] = dsp + } + return dspArray, nil +} + +// MarshalJSON is the custom marshaler for DataStoreParameters. +func (dsp DataStoreParameters) MarshalJSON() ([]byte, error) { + dsp.ObjectType = ObjectTypeDataStoreParameters + objectMap := make(map[string]interface{}) + if dsp.DataStoreType != "" { + objectMap["dataStoreType"] = dsp.DataStoreType + } + if dsp.ObjectType != "" { + objectMap["objectType"] = dsp.ObjectType + } + return json.Marshal(objectMap) +} + +// AsAzureOperationalStoreParameters is the BasicDataStoreParameters implementation for DataStoreParameters. +func (dsp DataStoreParameters) AsAzureOperationalStoreParameters() (*AzureOperationalStoreParameters, bool) { + return nil, false +} + +// AsDataStoreParameters is the BasicDataStoreParameters implementation for DataStoreParameters. +func (dsp DataStoreParameters) AsDataStoreParameters() (*DataStoreParameters, bool) { + return &dsp, true +} + +// AsBasicDataStoreParameters is the BasicDataStoreParameters implementation for DataStoreParameters. +func (dsp DataStoreParameters) AsBasicDataStoreParameters() (BasicDataStoreParameters, bool) { + return &dsp, true +} + +// Day day of the week +type Day struct { + // Date - Date of the month + Date *int32 `json:"date,omitempty"` + // IsLast - Whether Date is last date of month + IsLast *bool `json:"isLast,omitempty"` +} + +// BasicDeleteOption delete Option +type BasicDeleteOption interface { + AsAbsoluteDeleteOption() (*AbsoluteDeleteOption, bool) + AsDeleteOption() (*DeleteOption, bool) +} + +// DeleteOption delete Option +type DeleteOption struct { + // Duration - Duration of deletion after given timespan + Duration *string `json:"duration,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeDeleteOption', 'ObjectTypeAbsoluteDeleteOption' + ObjectType ObjectTypeBasicDeleteOption `json:"objectType,omitempty"` +} + +func unmarshalBasicDeleteOption(body []byte) (BasicDeleteOption, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["objectType"] { + case string(ObjectTypeAbsoluteDeleteOption): + var ado AbsoluteDeleteOption + err := json.Unmarshal(body, &ado) + return ado, err + default: + var do DeleteOption + err := json.Unmarshal(body, &do) + return do, err + } +} +func unmarshalBasicDeleteOptionArray(body []byte) ([]BasicDeleteOption, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + doArray := make([]BasicDeleteOption, len(rawMessages)) + + for index, rawMessage := range rawMessages { + do, err := unmarshalBasicDeleteOption(*rawMessage) + if err != nil { + return nil, err + } + doArray[index] = do + } + return doArray, nil +} + +// MarshalJSON is the custom marshaler for DeleteOption. +func (do DeleteOption) MarshalJSON() ([]byte, error) { + do.ObjectType = ObjectTypeDeleteOption + objectMap := make(map[string]interface{}) + if do.Duration != nil { + objectMap["duration"] = do.Duration + } + if do.ObjectType != "" { + objectMap["objectType"] = do.ObjectType + } + return json.Marshal(objectMap) +} + +// AsAbsoluteDeleteOption is the BasicDeleteOption implementation for DeleteOption. +func (do DeleteOption) AsAbsoluteDeleteOption() (*AbsoluteDeleteOption, bool) { + return nil, false +} + +// AsDeleteOption is the BasicDeleteOption implementation for DeleteOption. +func (do DeleteOption) AsDeleteOption() (*DeleteOption, bool) { + return &do, true +} + +// AsBasicDeleteOption is the BasicDeleteOption implementation for DeleteOption. +func (do DeleteOption) AsBasicDeleteOption() (BasicDeleteOption, bool) { + return &do, true +} + +// DppIdentityDetails identity details +type DppIdentityDetails struct { + // PrincipalID - READ-ONLY; The object ID of the service principal object for the managed identity that is used to grant role-based access to an Azure resource. + PrincipalID *string `json:"principalId,omitempty"` + // TenantID - READ-ONLY; A Globally Unique Identifier (GUID) that represents the Azure AD tenant where the resource is now a member. + TenantID *string `json:"tenantId,omitempty"` + // Type - The identityType which can be either SystemAssigned or None + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for DppIdentityDetails. +func (did DppIdentityDetails) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if did.Type != nil { + objectMap["type"] = did.Type + } + return json.Marshal(objectMap) +} + +// DppResource resource class +type DppResource struct { + // ID - READ-ONLY; Resource Id represents the complete path to the resource. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name associated with the resource. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type represents the complete path of the form Namespace/ResourceType/ResourceType/... + Type *string `json:"type,omitempty"` + SystemData *SystemData `json:"systemData,omitempty"` +} + +// MarshalJSON is the custom marshaler for DppResource. +func (dr DppResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dr.SystemData != nil { + objectMap["systemData"] = dr.SystemData + } + return json.Marshal(objectMap) +} + +// DppResourceList listResource +type DppResourceList struct { + // NextLink - The uri to fetch the next page of resources. Call ListNext() fetches next page of resources. + NextLink *string `json:"nextLink,omitempty"` +} + +// DppTrackedResource ... +type DppTrackedResource struct { + // ETag - Optional ETag. + ETag *string `json:"eTag,omitempty"` + // ID - READ-ONLY; Resource Id represents the complete path to the resource. + ID *string `json:"id,omitempty"` + // Identity - Input Managed Identity Details + Identity *DppIdentityDetails `json:"identity,omitempty"` + // Location - Resource location. + Location *string `json:"location,omitempty"` + // Name - READ-ONLY; Resource name associated with the resource. + Name *string `json:"name,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` + // Type - READ-ONLY; Resource type represents the complete path of the form Namespace/ResourceType/ResourceType/... + Type *string `json:"type,omitempty"` + SystemData *SystemData `json:"systemData,omitempty"` +} + +// MarshalJSON is the custom marshaler for DppTrackedResource. +func (dtr DppTrackedResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dtr.ETag != nil { + objectMap["eTag"] = dtr.ETag + } + if dtr.Identity != nil { + objectMap["identity"] = dtr.Identity + } + if dtr.Location != nil { + objectMap["location"] = dtr.Location + } + if dtr.Tags != nil { + objectMap["tags"] = dtr.Tags + } + if dtr.SystemData != nil { + objectMap["systemData"] = dtr.SystemData + } + return json.Marshal(objectMap) +} + +// DppTrackedResourceList ... +type DppTrackedResourceList struct { + // NextLink - The uri to fetch the next page of resources. Call ListNext() fetches next page of resources. + NextLink *string `json:"nextLink,omitempty"` +} + +// DppWorkerRequest ... +type DppWorkerRequest struct { + SubscriptionID *string `json:"subscriptionId,omitempty"` + URI *string `json:"uri,omitempty"` + Headers map[string][]string `json:"headers"` + SupportedGroupVersions *[]string `json:"supportedGroupVersions,omitempty"` + CultureInfo *string `json:"cultureInfo,omitempty"` + Parameters map[string]*string `json:"parameters"` + HTTPMethod *string `json:"httpMethod,omitempty"` +} + +// MarshalJSON is the custom marshaler for DppWorkerRequest. +func (dwr DppWorkerRequest) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dwr.SubscriptionID != nil { + objectMap["subscriptionId"] = dwr.SubscriptionID + } + if dwr.URI != nil { + objectMap["uri"] = dwr.URI + } + if dwr.Headers != nil { + objectMap["headers"] = dwr.Headers + } + if dwr.SupportedGroupVersions != nil { + objectMap["supportedGroupVersions"] = dwr.SupportedGroupVersions + } + if dwr.CultureInfo != nil { + objectMap["cultureInfo"] = dwr.CultureInfo + } + if dwr.Parameters != nil { + objectMap["parameters"] = dwr.Parameters + } + if dwr.HTTPMethod != nil { + objectMap["httpMethod"] = dwr.HTTPMethod + } + return json.Marshal(objectMap) +} + +// Error the resource management error response. +type Error struct { + // AdditionalInfo - READ-ONLY; The error additional info. + AdditionalInfo *[]ErrorAdditionalInfo `json:"additionalInfo,omitempty"` + // Code - READ-ONLY; The error code. + Code *string `json:"code,omitempty"` + // Details - READ-ONLY; The error details. + Details *[]Error `json:"details,omitempty"` + // Message - READ-ONLY; The error message. + Message *string `json:"message,omitempty"` + // Target - READ-ONLY; The error target. + Target *string `json:"target,omitempty"` +} + +// ErrorAdditionalInfo the resource management error additional info. +type ErrorAdditionalInfo struct { + // Info - READ-ONLY; The additional info. + Info interface{} `json:"info,omitempty"` + // Type - READ-ONLY; The additional info type. + Type *string `json:"type,omitempty"` +} + +// ExportJobsResult the result for export jobs containing blob details. +type ExportJobsResult struct { + autorest.Response `json:"-"` + // BlobURL - READ-ONLY; URL of the blob into which the serialized string of list of jobs is exported. + BlobURL *string `json:"blobUrl,omitempty"` + // BlobSasKey - READ-ONLY; SAS key to access the blob. + BlobSasKey *string `json:"blobSasKey,omitempty"` + // ExcelFileBlobURL - READ-ONLY; URL of the blob into which the ExcelFile is uploaded. + ExcelFileBlobURL *string `json:"excelFileBlobUrl,omitempty"` + // ExcelFileBlobSasKey - READ-ONLY; SAS key to access the ExcelFile blob. + ExcelFileBlobSasKey *string `json:"excelFileBlobSasKey,omitempty"` +} + +// ExportJobsTriggerFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type ExportJobsTriggerFuture struct { + azure.FutureAPI + // Result returns the result of the asynchronous operation. + // If the operation has not completed it will return an error. + Result func(ExportJobsClient) (autorest.Response, error) +} + +// UnmarshalJSON is the custom unmarshaller for CreateFuture. +func (future *ExportJobsTriggerFuture) UnmarshalJSON(body []byte) error { + var azFuture azure.Future + if err := json.Unmarshal(body, &azFuture); err != nil { + return err + } + future.FutureAPI = &azFuture + future.Result = future.result + return nil +} + +// result is the default implementation for ExportJobsTriggerFuture.Result. +func (future *ExportJobsTriggerFuture) result(client ExportJobsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.ExportJobsTriggerFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + ar.Response = future.Response() + err = azure.NewAsyncOpIncompleteError("dataprotection.ExportJobsTriggerFuture") + return + } + ar.Response = future.Response() + return +} + +// FeatureValidationRequest base class for feature object +type FeatureValidationRequest struct { + // FeatureType - backup support feature type. Possible values include: 'FeatureTypeInvalid', 'FeatureTypeDataSourceType' + FeatureType FeatureType `json:"featureType,omitempty"` + // FeatureName - backup support feature name. + FeatureName *string `json:"featureName,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeFeatureValidationRequestBase', 'ObjectTypeFeatureValidationRequest' + ObjectType ObjectTypeBasicFeatureValidationRequestBase `json:"objectType,omitempty"` +} + +// MarshalJSON is the custom marshaler for FeatureValidationRequest. +func (fvr FeatureValidationRequest) MarshalJSON() ([]byte, error) { + fvr.ObjectType = ObjectTypeFeatureValidationRequest + objectMap := make(map[string]interface{}) + if fvr.FeatureType != "" { + objectMap["featureType"] = fvr.FeatureType + } + if fvr.FeatureName != nil { + objectMap["featureName"] = fvr.FeatureName + } + if fvr.ObjectType != "" { + objectMap["objectType"] = fvr.ObjectType + } + return json.Marshal(objectMap) +} + +// AsFeatureValidationRequest is the BasicFeatureValidationRequestBase implementation for FeatureValidationRequest. +func (fvr FeatureValidationRequest) AsFeatureValidationRequest() (*FeatureValidationRequest, bool) { + return &fvr, true +} + +// AsFeatureValidationRequestBase is the BasicFeatureValidationRequestBase implementation for FeatureValidationRequest. +func (fvr FeatureValidationRequest) AsFeatureValidationRequestBase() (*FeatureValidationRequestBase, bool) { + return nil, false +} + +// AsBasicFeatureValidationRequestBase is the BasicFeatureValidationRequestBase implementation for FeatureValidationRequest. +func (fvr FeatureValidationRequest) AsBasicFeatureValidationRequestBase() (BasicFeatureValidationRequestBase, bool) { + return &fvr, true +} + +// BasicFeatureValidationRequestBase base class for Backup Feature support +type BasicFeatureValidationRequestBase interface { + AsFeatureValidationRequest() (*FeatureValidationRequest, bool) + AsFeatureValidationRequestBase() (*FeatureValidationRequestBase, bool) +} + +// FeatureValidationRequestBase base class for Backup Feature support +type FeatureValidationRequestBase struct { + // ObjectType - Possible values include: 'ObjectTypeFeatureValidationRequestBase', 'ObjectTypeFeatureValidationRequest' + ObjectType ObjectTypeBasicFeatureValidationRequestBase `json:"objectType,omitempty"` +} + +func unmarshalBasicFeatureValidationRequestBase(body []byte) (BasicFeatureValidationRequestBase, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["objectType"] { + case string(ObjectTypeFeatureValidationRequest): + var fvr FeatureValidationRequest + err := json.Unmarshal(body, &fvr) + return fvr, err + default: + var fvrb FeatureValidationRequestBase + err := json.Unmarshal(body, &fvrb) + return fvrb, err + } +} +func unmarshalBasicFeatureValidationRequestBaseArray(body []byte) ([]BasicFeatureValidationRequestBase, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + fvrbArray := make([]BasicFeatureValidationRequestBase, len(rawMessages)) + + for index, rawMessage := range rawMessages { + fvrb, err := unmarshalBasicFeatureValidationRequestBase(*rawMessage) + if err != nil { + return nil, err + } + fvrbArray[index] = fvrb + } + return fvrbArray, nil +} + +// MarshalJSON is the custom marshaler for FeatureValidationRequestBase. +func (fvrb FeatureValidationRequestBase) MarshalJSON() ([]byte, error) { + fvrb.ObjectType = ObjectTypeFeatureValidationRequestBase + objectMap := make(map[string]interface{}) + if fvrb.ObjectType != "" { + objectMap["objectType"] = fvrb.ObjectType + } + return json.Marshal(objectMap) +} + +// AsFeatureValidationRequest is the BasicFeatureValidationRequestBase implementation for FeatureValidationRequestBase. +func (fvrb FeatureValidationRequestBase) AsFeatureValidationRequest() (*FeatureValidationRequest, bool) { + return nil, false +} + +// AsFeatureValidationRequestBase is the BasicFeatureValidationRequestBase implementation for FeatureValidationRequestBase. +func (fvrb FeatureValidationRequestBase) AsFeatureValidationRequestBase() (*FeatureValidationRequestBase, bool) { + return &fvrb, true +} + +// AsBasicFeatureValidationRequestBase is the BasicFeatureValidationRequestBase implementation for FeatureValidationRequestBase. +func (fvrb FeatureValidationRequestBase) AsBasicFeatureValidationRequestBase() (BasicFeatureValidationRequestBase, bool) { + return &fvrb, true +} + +// FeatureValidationResponse feature Validation Response +type FeatureValidationResponse struct { + // FeatureType - backup support feature type. Possible values include: 'FeatureTypeInvalid', 'FeatureTypeDataSourceType' + FeatureType FeatureType `json:"featureType,omitempty"` + // Features - Response features + Features *[]SupportedFeature `json:"features,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeFeatureValidationResponseBase', 'ObjectTypeFeatureValidationResponse' + ObjectType ObjectTypeBasicFeatureValidationResponseBase `json:"objectType,omitempty"` +} + +// MarshalJSON is the custom marshaler for FeatureValidationResponse. +func (fvr FeatureValidationResponse) MarshalJSON() ([]byte, error) { + fvr.ObjectType = ObjectTypeFeatureValidationResponse + objectMap := make(map[string]interface{}) + if fvr.FeatureType != "" { + objectMap["featureType"] = fvr.FeatureType + } + if fvr.Features != nil { + objectMap["features"] = fvr.Features + } + if fvr.ObjectType != "" { + objectMap["objectType"] = fvr.ObjectType + } + return json.Marshal(objectMap) +} + +// AsFeatureValidationResponse is the BasicFeatureValidationResponseBase implementation for FeatureValidationResponse. +func (fvr FeatureValidationResponse) AsFeatureValidationResponse() (*FeatureValidationResponse, bool) { + return &fvr, true +} + +// AsFeatureValidationResponseBase is the BasicFeatureValidationResponseBase implementation for FeatureValidationResponse. +func (fvr FeatureValidationResponse) AsFeatureValidationResponseBase() (*FeatureValidationResponseBase, bool) { + return nil, false +} + +// AsBasicFeatureValidationResponseBase is the BasicFeatureValidationResponseBase implementation for FeatureValidationResponse. +func (fvr FeatureValidationResponse) AsBasicFeatureValidationResponseBase() (BasicFeatureValidationResponseBase, bool) { + return &fvr, true +} + +// BasicFeatureValidationResponseBase base class for Backup Feature support +type BasicFeatureValidationResponseBase interface { + AsFeatureValidationResponse() (*FeatureValidationResponse, bool) + AsFeatureValidationResponseBase() (*FeatureValidationResponseBase, bool) +} + +// FeatureValidationResponseBase base class for Backup Feature support +type FeatureValidationResponseBase struct { + autorest.Response `json:"-"` + // ObjectType - Possible values include: 'ObjectTypeFeatureValidationResponseBase', 'ObjectTypeFeatureValidationResponse' + ObjectType ObjectTypeBasicFeatureValidationResponseBase `json:"objectType,omitempty"` +} + +func unmarshalBasicFeatureValidationResponseBase(body []byte) (BasicFeatureValidationResponseBase, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["objectType"] { + case string(ObjectTypeFeatureValidationResponse): + var fvr FeatureValidationResponse + err := json.Unmarshal(body, &fvr) + return fvr, err + default: + var fvrb FeatureValidationResponseBase + err := json.Unmarshal(body, &fvrb) + return fvrb, err + } +} +func unmarshalBasicFeatureValidationResponseBaseArray(body []byte) ([]BasicFeatureValidationResponseBase, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + fvrbArray := make([]BasicFeatureValidationResponseBase, len(rawMessages)) + + for index, rawMessage := range rawMessages { + fvrb, err := unmarshalBasicFeatureValidationResponseBase(*rawMessage) + if err != nil { + return nil, err + } + fvrbArray[index] = fvrb + } + return fvrbArray, nil +} + +// MarshalJSON is the custom marshaler for FeatureValidationResponseBase. +func (fvrb FeatureValidationResponseBase) MarshalJSON() ([]byte, error) { + fvrb.ObjectType = ObjectTypeFeatureValidationResponseBase + objectMap := make(map[string]interface{}) + if fvrb.ObjectType != "" { + objectMap["objectType"] = fvrb.ObjectType + } + return json.Marshal(objectMap) +} + +// AsFeatureValidationResponse is the BasicFeatureValidationResponseBase implementation for FeatureValidationResponseBase. +func (fvrb FeatureValidationResponseBase) AsFeatureValidationResponse() (*FeatureValidationResponse, bool) { + return nil, false +} + +// AsFeatureValidationResponseBase is the BasicFeatureValidationResponseBase implementation for FeatureValidationResponseBase. +func (fvrb FeatureValidationResponseBase) AsFeatureValidationResponseBase() (*FeatureValidationResponseBase, bool) { + return &fvrb, true +} + +// AsBasicFeatureValidationResponseBase is the BasicFeatureValidationResponseBase implementation for FeatureValidationResponseBase. +func (fvrb FeatureValidationResponseBase) AsBasicFeatureValidationResponseBase() (BasicFeatureValidationResponseBase, bool) { + return &fvrb, true +} + +// FeatureValidationResponseBaseModel ... +type FeatureValidationResponseBaseModel struct { + autorest.Response `json:"-"` + Value BasicFeatureValidationResponseBase `json:"value,omitempty"` +} + +// UnmarshalJSON is the custom unmarshaler for FeatureValidationResponseBaseModel struct. +func (fvrbm *FeatureValidationResponseBaseModel) UnmarshalJSON(body []byte) error { + fvrb, err := unmarshalBasicFeatureValidationResponseBase(body) + if err != nil { + return err + } + fvrbm.Value = fvrb + + return nil +} + +// ImmediateCopyOption immediate copy Option +type ImmediateCopyOption struct { + // ObjectType - Possible values include: 'ObjectTypeCopyOption', 'ObjectTypeCopyOnExpiryOption', 'ObjectTypeCustomCopyOption', 'ObjectTypeImmediateCopyOption' + ObjectType ObjectTypeBasicCopyOption `json:"objectType,omitempty"` +} + +// MarshalJSON is the custom marshaler for ImmediateCopyOption. +func (ico ImmediateCopyOption) MarshalJSON() ([]byte, error) { + ico.ObjectType = ObjectTypeImmediateCopyOption + objectMap := make(map[string]interface{}) + if ico.ObjectType != "" { + objectMap["objectType"] = ico.ObjectType + } + return json.Marshal(objectMap) +} + +// AsCopyOnExpiryOption is the BasicCopyOption implementation for ImmediateCopyOption. +func (ico ImmediateCopyOption) AsCopyOnExpiryOption() (*CopyOnExpiryOption, bool) { + return nil, false +} + +// AsCustomCopyOption is the BasicCopyOption implementation for ImmediateCopyOption. +func (ico ImmediateCopyOption) AsCustomCopyOption() (*CustomCopyOption, bool) { + return nil, false +} + +// AsImmediateCopyOption is the BasicCopyOption implementation for ImmediateCopyOption. +func (ico ImmediateCopyOption) AsImmediateCopyOption() (*ImmediateCopyOption, bool) { + return &ico, true +} + +// AsCopyOption is the BasicCopyOption implementation for ImmediateCopyOption. +func (ico ImmediateCopyOption) AsCopyOption() (*CopyOption, bool) { + return nil, false +} + +// AsBasicCopyOption is the BasicCopyOption implementation for ImmediateCopyOption. +func (ico ImmediateCopyOption) AsBasicCopyOption() (BasicCopyOption, bool) { + return &ico, true +} + +// InnerError inner Error +type InnerError struct { + // AdditionalInfo - Any Key value pairs that can be provided to the client for additional verbose information. + AdditionalInfo map[string]*string `json:"additionalInfo"` + // Code - Unique code for this error + Code *string `json:"code,omitempty"` + // EmbeddedInnerError - Child Inner Error, to allow Nesting. + EmbeddedInnerError *InnerError `json:"embeddedInnerError,omitempty"` +} + +// MarshalJSON is the custom marshaler for InnerError. +func (ie InnerError) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ie.AdditionalInfo != nil { + objectMap["additionalInfo"] = ie.AdditionalInfo + } + if ie.Code != nil { + objectMap["code"] = ie.Code + } + if ie.EmbeddedInnerError != nil { + objectMap["embeddedInnerError"] = ie.EmbeddedInnerError + } + return json.Marshal(objectMap) +} + +// BasicItemLevelRestoreCriteria class to contain criteria for item level restore +type BasicItemLevelRestoreCriteria interface { + AsRangeBasedItemLevelRestoreCriteria() (*RangeBasedItemLevelRestoreCriteria, bool) + AsItemLevelRestoreCriteria() (*ItemLevelRestoreCriteria, bool) +} + +// ItemLevelRestoreCriteria class to contain criteria for item level restore +type ItemLevelRestoreCriteria struct { + // ObjectType - Possible values include: 'ObjectTypeItemLevelRestoreCriteria', 'ObjectTypeRangeBasedItemLevelRestoreCriteria' + ObjectType ObjectTypeBasicItemLevelRestoreCriteria `json:"objectType,omitempty"` +} + +func unmarshalBasicItemLevelRestoreCriteria(body []byte) (BasicItemLevelRestoreCriteria, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["objectType"] { + case string(ObjectTypeRangeBasedItemLevelRestoreCriteria): + var rbilrc RangeBasedItemLevelRestoreCriteria + err := json.Unmarshal(body, &rbilrc) + return rbilrc, err + default: + var ilrc ItemLevelRestoreCriteria + err := json.Unmarshal(body, &ilrc) + return ilrc, err + } +} +func unmarshalBasicItemLevelRestoreCriteriaArray(body []byte) ([]BasicItemLevelRestoreCriteria, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + ilrcArray := make([]BasicItemLevelRestoreCriteria, len(rawMessages)) + + for index, rawMessage := range rawMessages { + ilrc, err := unmarshalBasicItemLevelRestoreCriteria(*rawMessage) + if err != nil { + return nil, err + } + ilrcArray[index] = ilrc + } + return ilrcArray, nil +} + +// MarshalJSON is the custom marshaler for ItemLevelRestoreCriteria. +func (ilrc ItemLevelRestoreCriteria) MarshalJSON() ([]byte, error) { + ilrc.ObjectType = ObjectTypeItemLevelRestoreCriteria + objectMap := make(map[string]interface{}) + if ilrc.ObjectType != "" { + objectMap["objectType"] = ilrc.ObjectType + } + return json.Marshal(objectMap) +} + +// AsRangeBasedItemLevelRestoreCriteria is the BasicItemLevelRestoreCriteria implementation for ItemLevelRestoreCriteria. +func (ilrc ItemLevelRestoreCriteria) AsRangeBasedItemLevelRestoreCriteria() (*RangeBasedItemLevelRestoreCriteria, bool) { + return nil, false +} + +// AsItemLevelRestoreCriteria is the BasicItemLevelRestoreCriteria implementation for ItemLevelRestoreCriteria. +func (ilrc ItemLevelRestoreCriteria) AsItemLevelRestoreCriteria() (*ItemLevelRestoreCriteria, bool) { + return &ilrc, true +} + +// AsBasicItemLevelRestoreCriteria is the BasicItemLevelRestoreCriteria implementation for ItemLevelRestoreCriteria. +func (ilrc ItemLevelRestoreCriteria) AsBasicItemLevelRestoreCriteria() (BasicItemLevelRestoreCriteria, bool) { + return &ilrc, true +} + +// ItemLevelRestoreTargetInfo restore target info for Item level restore operation +type ItemLevelRestoreTargetInfo struct { + // RestoreCriteria - Restore Criteria + RestoreCriteria *[]BasicItemLevelRestoreCriteria `json:"restoreCriteria,omitempty"` + // DatasourceInfo - Information of target DS + DatasourceInfo *Datasource `json:"datasourceInfo,omitempty"` + // DatasourceSetInfo - Information of target DS Set + DatasourceSetInfo *DatasourceSet `json:"datasourceSetInfo,omitempty"` + // RecoveryOption - Recovery Option + RecoveryOption *string `json:"recoveryOption,omitempty"` + // RestoreLocation - Target Restore region + RestoreLocation *string `json:"restoreLocation,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeRestoreTargetInfoBase', 'ObjectTypeItemLevelRestoreTargetInfo', 'ObjectTypeRestoreFilesTargetInfo', 'ObjectTypeRestoreTargetInfo' + ObjectType ObjectTypeBasicRestoreTargetInfoBase `json:"objectType,omitempty"` +} + +// MarshalJSON is the custom marshaler for ItemLevelRestoreTargetInfo. +func (ilrti ItemLevelRestoreTargetInfo) MarshalJSON() ([]byte, error) { + ilrti.ObjectType = ObjectTypeItemLevelRestoreTargetInfo + objectMap := make(map[string]interface{}) + if ilrti.RestoreCriteria != nil { + objectMap["restoreCriteria"] = ilrti.RestoreCriteria + } + if ilrti.DatasourceInfo != nil { + objectMap["datasourceInfo"] = ilrti.DatasourceInfo + } + if ilrti.DatasourceSetInfo != nil { + objectMap["datasourceSetInfo"] = ilrti.DatasourceSetInfo + } + if ilrti.RecoveryOption != nil { + objectMap["recoveryOption"] = ilrti.RecoveryOption + } + if ilrti.RestoreLocation != nil { + objectMap["restoreLocation"] = ilrti.RestoreLocation + } + if ilrti.ObjectType != "" { + objectMap["objectType"] = ilrti.ObjectType + } + return json.Marshal(objectMap) +} + +// AsItemLevelRestoreTargetInfo is the BasicRestoreTargetInfoBase implementation for ItemLevelRestoreTargetInfo. +func (ilrti ItemLevelRestoreTargetInfo) AsItemLevelRestoreTargetInfo() (*ItemLevelRestoreTargetInfo, bool) { + return &ilrti, true +} + +// AsRestoreFilesTargetInfo is the BasicRestoreTargetInfoBase implementation for ItemLevelRestoreTargetInfo. +func (ilrti ItemLevelRestoreTargetInfo) AsRestoreFilesTargetInfo() (*RestoreFilesTargetInfo, bool) { + return nil, false +} + +// AsRestoreTargetInfo is the BasicRestoreTargetInfoBase implementation for ItemLevelRestoreTargetInfo. +func (ilrti ItemLevelRestoreTargetInfo) AsRestoreTargetInfo() (*RestoreTargetInfo, bool) { + return nil, false +} + +// AsRestoreTargetInfoBase is the BasicRestoreTargetInfoBase implementation for ItemLevelRestoreTargetInfo. +func (ilrti ItemLevelRestoreTargetInfo) AsRestoreTargetInfoBase() (*RestoreTargetInfoBase, bool) { + return nil, false +} + +// AsBasicRestoreTargetInfoBase is the BasicRestoreTargetInfoBase implementation for ItemLevelRestoreTargetInfo. +func (ilrti ItemLevelRestoreTargetInfo) AsBasicRestoreTargetInfoBase() (BasicRestoreTargetInfoBase, bool) { + return &ilrti, true +} + +// UnmarshalJSON is the custom unmarshaler for ItemLevelRestoreTargetInfo struct. +func (ilrti *ItemLevelRestoreTargetInfo) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "restoreCriteria": + if v != nil { + restoreCriteria, err := unmarshalBasicItemLevelRestoreCriteriaArray(*v) + if err != nil { + return err + } + ilrti.RestoreCriteria = &restoreCriteria + } + case "datasourceInfo": + if v != nil { + var datasourceInfo Datasource + err = json.Unmarshal(*v, &datasourceInfo) + if err != nil { + return err + } + ilrti.DatasourceInfo = &datasourceInfo + } + case "datasourceSetInfo": + if v != nil { + var datasourceSetInfo DatasourceSet + err = json.Unmarshal(*v, &datasourceSetInfo) + if err != nil { + return err + } + ilrti.DatasourceSetInfo = &datasourceSetInfo + } + case "recoveryOption": + if v != nil { + var recoveryOption string + err = json.Unmarshal(*v, &recoveryOption) + if err != nil { + return err + } + ilrti.RecoveryOption = &recoveryOption + } + case "restoreLocation": + if v != nil { + var restoreLocation string + err = json.Unmarshal(*v, &restoreLocation) + if err != nil { + return err + } + ilrti.RestoreLocation = &restoreLocation + } + case "objectType": + if v != nil { + var objectType ObjectTypeBasicRestoreTargetInfoBase + err = json.Unmarshal(*v, &objectType) + if err != nil { + return err + } + ilrti.ObjectType = objectType + } + } + } + + return nil +} + +// JobExtendedInfo extended Information about the job +type JobExtendedInfo struct { + // AdditionalDetails - Job's Additional Details + AdditionalDetails map[string]*string `json:"additionalDetails"` + // BackupInstanceState - READ-ONLY; State of the Backup Instance + BackupInstanceState *string `json:"backupInstanceState,omitempty"` + // DataTransferredInBytes - READ-ONLY; Number of bytes transferred + DataTransferredInBytes *float64 `json:"dataTransferredInBytes,omitempty"` + // RecoveryDestination - READ-ONLY; Destination where restore is done + RecoveryDestination *string `json:"recoveryDestination,omitempty"` + // SourceRecoverPoint - READ-ONLY; Details of the Source Recovery Point + SourceRecoverPoint *RestoreJobRecoveryPointDetails `json:"sourceRecoverPoint,omitempty"` + // SubTasks - READ-ONLY; List of Sub Tasks of the job + SubTasks *[]JobSubTask `json:"subTasks,omitempty"` + // TargetRecoverPoint - READ-ONLY; Details of the Target Recovery Point + TargetRecoverPoint *RestoreJobRecoveryPointDetails `json:"targetRecoverPoint,omitempty"` +} + +// MarshalJSON is the custom marshaler for JobExtendedInfo. +func (jei JobExtendedInfo) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if jei.AdditionalDetails != nil { + objectMap["additionalDetails"] = jei.AdditionalDetails + } + return json.Marshal(objectMap) +} + +// JobSubTask details of Job's Sub Task +type JobSubTask struct { + // AdditionalDetails - Additional details of Sub Tasks + AdditionalDetails map[string]*string `json:"additionalDetails"` + // TaskID - Task Id of the Sub Task + TaskID *int32 `json:"taskId,omitempty"` + // TaskName - Name of the Sub Task + TaskName *string `json:"taskName,omitempty"` + // TaskProgress - READ-ONLY; Progress of the Sub Task + TaskProgress *string `json:"taskProgress,omitempty"` + // TaskStatus - Status of the Sub Task + TaskStatus *string `json:"taskStatus,omitempty"` +} + +// MarshalJSON is the custom marshaler for JobSubTask. +func (jst JobSubTask) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if jst.AdditionalDetails != nil { + objectMap["additionalDetails"] = jst.AdditionalDetails + } + if jst.TaskID != nil { + objectMap["taskId"] = jst.TaskID + } + if jst.TaskName != nil { + objectMap["taskName"] = jst.TaskName + } + if jst.TaskStatus != nil { + objectMap["taskStatus"] = jst.TaskStatus + } + return json.Marshal(objectMap) +} + +// OperationExtendedInfo operation Extended Info +type OperationExtendedInfo struct { + // ObjectType - This property will be used as the discriminator for deciding the specific types in the polymorphic chain of types. + ObjectType *string `json:"objectType,omitempty"` +} + +// OperationJobExtendedInfo operation Job Extended Info +type OperationJobExtendedInfo struct { + autorest.Response `json:"-"` + // JobID - Arm Id of the job created for this operation. + JobID *string `json:"jobId,omitempty"` + // ObjectType - This property will be used as the discriminator for deciding the specific types in the polymorphic chain of types. + ObjectType *string `json:"objectType,omitempty"` +} + +// OperationResource operation Resource +type OperationResource struct { + autorest.Response `json:"-"` + // EndTime - End time of the operation + EndTime *date.Time `json:"endTime,omitempty"` + // Error - Required if status == failed or status == canceled. This is the OData v4 error format, used by the RPC and will go into the v2.2 Azure REST API guidelines. + // The full set of optional properties (e.g. inner errors / details) can be found in the "Error Response" section. + Error *Error `json:"error,omitempty"` + // ID - It should match what is used to GET the operation result + ID *string `json:"id,omitempty"` + // Name - It must match the last segment of the "id" field, and will typically be a GUID / system generated value + Name *string `json:"name,omitempty"` + // Properties - End time of the operation + Properties *OperationExtendedInfo `json:"properties,omitempty"` + // StartTime - Start time of the operation + StartTime *date.Time `json:"startTime,omitempty"` + Status *string `json:"status,omitempty"` +} + +// PatchResourceRequestInput patch Request content for Microsoft.DataProtection resources +type PatchResourceRequestInput struct { + // Identity - Input Managed Identity Details + Identity *DppIdentityDetails `json:"identity,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for PatchResourceRequestInput. +func (prri PatchResourceRequestInput) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if prri.Identity != nil { + objectMap["identity"] = prri.Identity + } + if prri.Tags != nil { + objectMap["tags"] = prri.Tags + } + return json.Marshal(objectMap) +} + +// PolicyInfo policy Info in backupInstance +type PolicyInfo struct { + PolicyID *string `json:"policyId,omitempty"` + // PolicyVersion - READ-ONLY + PolicyVersion *string `json:"policyVersion,omitempty"` + // PolicyParameters - Policy parameters for the backup instance + PolicyParameters *PolicyParameters `json:"policyParameters,omitempty"` +} + +// MarshalJSON is the custom marshaler for PolicyInfo. +func (pi PolicyInfo) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if pi.PolicyID != nil { + objectMap["policyId"] = pi.PolicyID + } + if pi.PolicyParameters != nil { + objectMap["policyParameters"] = pi.PolicyParameters + } + return json.Marshal(objectMap) +} + +// PolicyParameters parameters in Policy +type PolicyParameters struct { + // DataStoreParametersList - Gets or sets the DataStore Parameters + DataStoreParametersList *[]BasicDataStoreParameters `json:"dataStoreParametersList,omitempty"` +} + +// UnmarshalJSON is the custom unmarshaler for PolicyParameters struct. +func (pp *PolicyParameters) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "dataStoreParametersList": + if v != nil { + dataStoreParametersList, err := unmarshalBasicDataStoreParametersArray(*v) + if err != nil { + return err + } + pp.DataStoreParametersList = &dataStoreParametersList + } + } + } + + return nil +} + +// ProtectionStatusDetails protection status details +type ProtectionStatusDetails struct { + // ErrorDetails - Specifies the protection status error of the resource + ErrorDetails *UserFacingError `json:"errorDetails,omitempty"` + // Status - Specifies the protection status of the resource. Possible values include: 'StatusConfiguringProtection', 'StatusConfiguringProtectionFailed', 'StatusProtectionConfigured', 'StatusProtectionStopped', 'StatusSoftDeleted', 'StatusSoftDeleting' + Status Status `json:"status,omitempty"` +} + +// RangeBasedItemLevelRestoreCriteria item Level target info for restore operation +type RangeBasedItemLevelRestoreCriteria struct { + // MinMatchingValue - minimum value for range prefix match + MinMatchingValue *string `json:"minMatchingValue,omitempty"` + // MaxMatchingValue - maximum value for range prefix match + MaxMatchingValue *string `json:"maxMatchingValue,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeItemLevelRestoreCriteria', 'ObjectTypeRangeBasedItemLevelRestoreCriteria' + ObjectType ObjectTypeBasicItemLevelRestoreCriteria `json:"objectType,omitempty"` +} + +// MarshalJSON is the custom marshaler for RangeBasedItemLevelRestoreCriteria. +func (rbilrc RangeBasedItemLevelRestoreCriteria) MarshalJSON() ([]byte, error) { + rbilrc.ObjectType = ObjectTypeRangeBasedItemLevelRestoreCriteria + objectMap := make(map[string]interface{}) + if rbilrc.MinMatchingValue != nil { + objectMap["minMatchingValue"] = rbilrc.MinMatchingValue + } + if rbilrc.MaxMatchingValue != nil { + objectMap["maxMatchingValue"] = rbilrc.MaxMatchingValue + } + if rbilrc.ObjectType != "" { + objectMap["objectType"] = rbilrc.ObjectType + } + return json.Marshal(objectMap) +} + +// AsRangeBasedItemLevelRestoreCriteria is the BasicItemLevelRestoreCriteria implementation for RangeBasedItemLevelRestoreCriteria. +func (rbilrc RangeBasedItemLevelRestoreCriteria) AsRangeBasedItemLevelRestoreCriteria() (*RangeBasedItemLevelRestoreCriteria, bool) { + return &rbilrc, true +} + +// AsItemLevelRestoreCriteria is the BasicItemLevelRestoreCriteria implementation for RangeBasedItemLevelRestoreCriteria. +func (rbilrc RangeBasedItemLevelRestoreCriteria) AsItemLevelRestoreCriteria() (*ItemLevelRestoreCriteria, bool) { + return nil, false +} + +// AsBasicItemLevelRestoreCriteria is the BasicItemLevelRestoreCriteria implementation for RangeBasedItemLevelRestoreCriteria. +func (rbilrc RangeBasedItemLevelRestoreCriteria) AsBasicItemLevelRestoreCriteria() (BasicItemLevelRestoreCriteria, bool) { + return &rbilrc, true +} + +// RecoveryPointDataStoreDetails recoveryPoint datastore details +type RecoveryPointDataStoreDetails struct { + CreationTime *date.Time `json:"creationTime,omitempty"` + ExpiryTime *date.Time `json:"expiryTime,omitempty"` + ID *string `json:"id,omitempty"` + MetaData *string `json:"metaData,omitempty"` + State *string `json:"state,omitempty"` + Type *string `json:"type,omitempty"` + Visible *bool `json:"visible,omitempty"` + // RehydrationExpiryTime - READ-ONLY + RehydrationExpiryTime *date.Time `json:"rehydrationExpiryTime,omitempty"` + // RehydrationStatus - READ-ONLY; Possible values include: 'CREATEINPROGRESS', 'COMPLETED', 'DELETEINPROGRESS', 'DELETED', 'FAILED' + RehydrationStatus RehydrationStatus `json:"rehydrationStatus,omitempty"` +} + +// MarshalJSON is the custom marshaler for RecoveryPointDataStoreDetails. +func (rpdsd RecoveryPointDataStoreDetails) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if rpdsd.CreationTime != nil { + objectMap["creationTime"] = rpdsd.CreationTime + } + if rpdsd.ExpiryTime != nil { + objectMap["expiryTime"] = rpdsd.ExpiryTime + } + if rpdsd.ID != nil { + objectMap["id"] = rpdsd.ID + } + if rpdsd.MetaData != nil { + objectMap["metaData"] = rpdsd.MetaData + } + if rpdsd.State != nil { + objectMap["state"] = rpdsd.State + } + if rpdsd.Type != nil { + objectMap["type"] = rpdsd.Type + } + if rpdsd.Visible != nil { + objectMap["visible"] = rpdsd.Visible + } + return json.Marshal(objectMap) +} + +// RecoveryPointsFilters ... +type RecoveryPointsFilters struct { + RestorePointDataStoreID *string `json:"restorePointDataStoreId,omitempty"` + IsVisible *bool `json:"isVisible,omitempty"` + StartDate *string `json:"startDate,omitempty"` + EndDate *string `json:"endDate,omitempty"` + ExtendedInfo *bool `json:"extendedInfo,omitempty"` + RestorePointState *string `json:"restorePointState,omitempty"` +} + +// RestorableTimeRange ... +type RestorableTimeRange struct { + // StartTime - Start time for the available restore range + StartTime *string `json:"startTime,omitempty"` + // EndTime - End time for the available restore range + EndTime *string `json:"endTime,omitempty"` + ObjectType *string `json:"objectType,omitempty"` +} + +// RestoreFilesTargetInfo class encapsulating restore as files target parameters +type RestoreFilesTargetInfo struct { + // TargetDetails - Destination of RestoreAsFiles operation, when destination is not a datasource + TargetDetails *TargetDetails `json:"targetDetails,omitempty"` + // RecoveryOption - Recovery Option + RecoveryOption *string `json:"recoveryOption,omitempty"` + // RestoreLocation - Target Restore region + RestoreLocation *string `json:"restoreLocation,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeRestoreTargetInfoBase', 'ObjectTypeItemLevelRestoreTargetInfo', 'ObjectTypeRestoreFilesTargetInfo', 'ObjectTypeRestoreTargetInfo' + ObjectType ObjectTypeBasicRestoreTargetInfoBase `json:"objectType,omitempty"` +} + +// MarshalJSON is the custom marshaler for RestoreFilesTargetInfo. +func (rfti RestoreFilesTargetInfo) MarshalJSON() ([]byte, error) { + rfti.ObjectType = ObjectTypeRestoreFilesTargetInfo + objectMap := make(map[string]interface{}) + if rfti.TargetDetails != nil { + objectMap["targetDetails"] = rfti.TargetDetails + } + if rfti.RecoveryOption != nil { + objectMap["recoveryOption"] = rfti.RecoveryOption + } + if rfti.RestoreLocation != nil { + objectMap["restoreLocation"] = rfti.RestoreLocation + } + if rfti.ObjectType != "" { + objectMap["objectType"] = rfti.ObjectType + } + return json.Marshal(objectMap) +} + +// AsItemLevelRestoreTargetInfo is the BasicRestoreTargetInfoBase implementation for RestoreFilesTargetInfo. +func (rfti RestoreFilesTargetInfo) AsItemLevelRestoreTargetInfo() (*ItemLevelRestoreTargetInfo, bool) { + return nil, false +} + +// AsRestoreFilesTargetInfo is the BasicRestoreTargetInfoBase implementation for RestoreFilesTargetInfo. +func (rfti RestoreFilesTargetInfo) AsRestoreFilesTargetInfo() (*RestoreFilesTargetInfo, bool) { + return &rfti, true +} + +// AsRestoreTargetInfo is the BasicRestoreTargetInfoBase implementation for RestoreFilesTargetInfo. +func (rfti RestoreFilesTargetInfo) AsRestoreTargetInfo() (*RestoreTargetInfo, bool) { + return nil, false +} + +// AsRestoreTargetInfoBase is the BasicRestoreTargetInfoBase implementation for RestoreFilesTargetInfo. +func (rfti RestoreFilesTargetInfo) AsRestoreTargetInfoBase() (*RestoreTargetInfoBase, bool) { + return nil, false +} + +// AsBasicRestoreTargetInfoBase is the BasicRestoreTargetInfoBase implementation for RestoreFilesTargetInfo. +func (rfti RestoreFilesTargetInfo) AsBasicRestoreTargetInfoBase() (BasicRestoreTargetInfoBase, bool) { + return &rfti, true +} + +// RestoreJobRecoveryPointDetails ... +type RestoreJobRecoveryPointDetails struct { + RecoveryPointID *string `json:"recoveryPointID,omitempty"` + RecoveryPointTime *date.Time `json:"recoveryPointTime,omitempty"` +} + +// RestoreTargetInfo class encapsulating restore target parameters +type RestoreTargetInfo struct { + // DatasourceInfo - Information of target DS + DatasourceInfo *Datasource `json:"datasourceInfo,omitempty"` + // DatasourceSetInfo - Information of target DS Set + DatasourceSetInfo *DatasourceSet `json:"datasourceSetInfo,omitempty"` + // RecoveryOption - Recovery Option + RecoveryOption *string `json:"recoveryOption,omitempty"` + // RestoreLocation - Target Restore region + RestoreLocation *string `json:"restoreLocation,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeRestoreTargetInfoBase', 'ObjectTypeItemLevelRestoreTargetInfo', 'ObjectTypeRestoreFilesTargetInfo', 'ObjectTypeRestoreTargetInfo' + ObjectType ObjectTypeBasicRestoreTargetInfoBase `json:"objectType,omitempty"` +} + +// MarshalJSON is the custom marshaler for RestoreTargetInfo. +func (rti RestoreTargetInfo) MarshalJSON() ([]byte, error) { + rti.ObjectType = ObjectTypeRestoreTargetInfo + objectMap := make(map[string]interface{}) + if rti.DatasourceInfo != nil { + objectMap["datasourceInfo"] = rti.DatasourceInfo + } + if rti.DatasourceSetInfo != nil { + objectMap["datasourceSetInfo"] = rti.DatasourceSetInfo + } + if rti.RecoveryOption != nil { + objectMap["recoveryOption"] = rti.RecoveryOption + } + if rti.RestoreLocation != nil { + objectMap["restoreLocation"] = rti.RestoreLocation + } + if rti.ObjectType != "" { + objectMap["objectType"] = rti.ObjectType + } + return json.Marshal(objectMap) +} + +// AsItemLevelRestoreTargetInfo is the BasicRestoreTargetInfoBase implementation for RestoreTargetInfo. +func (rti RestoreTargetInfo) AsItemLevelRestoreTargetInfo() (*ItemLevelRestoreTargetInfo, bool) { + return nil, false +} + +// AsRestoreFilesTargetInfo is the BasicRestoreTargetInfoBase implementation for RestoreTargetInfo. +func (rti RestoreTargetInfo) AsRestoreFilesTargetInfo() (*RestoreFilesTargetInfo, bool) { + return nil, false +} + +// AsRestoreTargetInfo is the BasicRestoreTargetInfoBase implementation for RestoreTargetInfo. +func (rti RestoreTargetInfo) AsRestoreTargetInfo() (*RestoreTargetInfo, bool) { + return &rti, true +} + +// AsRestoreTargetInfoBase is the BasicRestoreTargetInfoBase implementation for RestoreTargetInfo. +func (rti RestoreTargetInfo) AsRestoreTargetInfoBase() (*RestoreTargetInfoBase, bool) { + return nil, false +} + +// AsBasicRestoreTargetInfoBase is the BasicRestoreTargetInfoBase implementation for RestoreTargetInfo. +func (rti RestoreTargetInfo) AsBasicRestoreTargetInfoBase() (BasicRestoreTargetInfoBase, bool) { + return &rti, true +} + +// BasicRestoreTargetInfoBase base class common to RestoreTargetInfo and RestoreFilesTargetInfo +type BasicRestoreTargetInfoBase interface { + AsItemLevelRestoreTargetInfo() (*ItemLevelRestoreTargetInfo, bool) + AsRestoreFilesTargetInfo() (*RestoreFilesTargetInfo, bool) + AsRestoreTargetInfo() (*RestoreTargetInfo, bool) + AsRestoreTargetInfoBase() (*RestoreTargetInfoBase, bool) +} + +// RestoreTargetInfoBase base class common to RestoreTargetInfo and RestoreFilesTargetInfo +type RestoreTargetInfoBase struct { + // RecoveryOption - Recovery Option + RecoveryOption *string `json:"recoveryOption,omitempty"` + // RestoreLocation - Target Restore region + RestoreLocation *string `json:"restoreLocation,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeRestoreTargetInfoBase', 'ObjectTypeItemLevelRestoreTargetInfo', 'ObjectTypeRestoreFilesTargetInfo', 'ObjectTypeRestoreTargetInfo' + ObjectType ObjectTypeBasicRestoreTargetInfoBase `json:"objectType,omitempty"` +} + +func unmarshalBasicRestoreTargetInfoBase(body []byte) (BasicRestoreTargetInfoBase, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["objectType"] { + case string(ObjectTypeItemLevelRestoreTargetInfo): + var ilrti ItemLevelRestoreTargetInfo + err := json.Unmarshal(body, &ilrti) + return ilrti, err + case string(ObjectTypeRestoreFilesTargetInfo): + var rfti RestoreFilesTargetInfo + err := json.Unmarshal(body, &rfti) + return rfti, err + case string(ObjectTypeRestoreTargetInfo): + var rti RestoreTargetInfo + err := json.Unmarshal(body, &rti) + return rti, err + default: + var rtib RestoreTargetInfoBase + err := json.Unmarshal(body, &rtib) + return rtib, err + } +} +func unmarshalBasicRestoreTargetInfoBaseArray(body []byte) ([]BasicRestoreTargetInfoBase, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + rtibArray := make([]BasicRestoreTargetInfoBase, len(rawMessages)) + + for index, rawMessage := range rawMessages { + rtib, err := unmarshalBasicRestoreTargetInfoBase(*rawMessage) + if err != nil { + return nil, err + } + rtibArray[index] = rtib + } + return rtibArray, nil +} + +// MarshalJSON is the custom marshaler for RestoreTargetInfoBase. +func (rtib RestoreTargetInfoBase) MarshalJSON() ([]byte, error) { + rtib.ObjectType = ObjectTypeRestoreTargetInfoBase + objectMap := make(map[string]interface{}) + if rtib.RecoveryOption != nil { + objectMap["recoveryOption"] = rtib.RecoveryOption + } + if rtib.RestoreLocation != nil { + objectMap["restoreLocation"] = rtib.RestoreLocation + } + if rtib.ObjectType != "" { + objectMap["objectType"] = rtib.ObjectType + } + return json.Marshal(objectMap) +} + +// AsItemLevelRestoreTargetInfo is the BasicRestoreTargetInfoBase implementation for RestoreTargetInfoBase. +func (rtib RestoreTargetInfoBase) AsItemLevelRestoreTargetInfo() (*ItemLevelRestoreTargetInfo, bool) { + return nil, false +} + +// AsRestoreFilesTargetInfo is the BasicRestoreTargetInfoBase implementation for RestoreTargetInfoBase. +func (rtib RestoreTargetInfoBase) AsRestoreFilesTargetInfo() (*RestoreFilesTargetInfo, bool) { + return nil, false +} + +// AsRestoreTargetInfo is the BasicRestoreTargetInfoBase implementation for RestoreTargetInfoBase. +func (rtib RestoreTargetInfoBase) AsRestoreTargetInfo() (*RestoreTargetInfo, bool) { + return nil, false +} + +// AsRestoreTargetInfoBase is the BasicRestoreTargetInfoBase implementation for RestoreTargetInfoBase. +func (rtib RestoreTargetInfoBase) AsRestoreTargetInfoBase() (*RestoreTargetInfoBase, bool) { + return &rtib, true +} + +// AsBasicRestoreTargetInfoBase is the BasicRestoreTargetInfoBase implementation for RestoreTargetInfoBase. +func (rtib RestoreTargetInfoBase) AsBasicRestoreTargetInfoBase() (BasicRestoreTargetInfoBase, bool) { + return &rtib, true +} + +// RetentionTag retention tag +type RetentionTag struct { + // ETag - READ-ONLY; Retention Tag version. + ETag *string `json:"eTag,omitempty"` + // ID - READ-ONLY; Retention Tag version. + ID *string `json:"id,omitempty"` + // TagName - Retention Tag Name to relate it to retention rule. + TagName *string `json:"tagName,omitempty"` +} + +// MarshalJSON is the custom marshaler for RetentionTag. +func (rt RetentionTag) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if rt.TagName != nil { + objectMap["tagName"] = rt.TagName + } + return json.Marshal(objectMap) +} + +// ScheduleBasedBackupCriteria schedule based backup criteria +type ScheduleBasedBackupCriteria struct { + // AbsoluteCriteria - it contains absolute values like "AllBackup" / "FirstOfDay" / "FirstOfWeek" / "FirstOfMonth" + // and should be part of AbsoluteMarker enum + AbsoluteCriteria *[]AbsoluteMarker `json:"absoluteCriteria,omitempty"` + // DaysOfMonth - This is day of the month from 1 to 28 other wise last of month + DaysOfMonth *[]Day `json:"daysOfMonth,omitempty"` + // DaysOfTheWeek - It should be Sunday/Monday/T..../Saturday + DaysOfTheWeek *[]DayOfWeek `json:"daysOfTheWeek,omitempty"` + // MonthsOfYear - It should be January/February/....../December + MonthsOfYear *[]Month `json:"monthsOfYear,omitempty"` + // ScheduleTimes - List of schedule times for backup + ScheduleTimes *[]date.Time `json:"scheduleTimes,omitempty"` + // WeeksOfTheMonth - It should be First/Second/Third/Fourth/Last + WeeksOfTheMonth *[]WeekNumber `json:"weeksOfTheMonth,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeBackupCriteria', 'ObjectTypeScheduleBasedBackupCriteria' + ObjectType ObjectTypeBasicBackupCriteria `json:"objectType,omitempty"` +} + +// MarshalJSON is the custom marshaler for ScheduleBasedBackupCriteria. +func (sbbc ScheduleBasedBackupCriteria) MarshalJSON() ([]byte, error) { + sbbc.ObjectType = ObjectTypeScheduleBasedBackupCriteria + objectMap := make(map[string]interface{}) + if sbbc.AbsoluteCriteria != nil { + objectMap["absoluteCriteria"] = sbbc.AbsoluteCriteria + } + if sbbc.DaysOfMonth != nil { + objectMap["daysOfMonth"] = sbbc.DaysOfMonth + } + if sbbc.DaysOfTheWeek != nil { + objectMap["daysOfTheWeek"] = sbbc.DaysOfTheWeek + } + if sbbc.MonthsOfYear != nil { + objectMap["monthsOfYear"] = sbbc.MonthsOfYear + } + if sbbc.ScheduleTimes != nil { + objectMap["scheduleTimes"] = sbbc.ScheduleTimes + } + if sbbc.WeeksOfTheMonth != nil { + objectMap["weeksOfTheMonth"] = sbbc.WeeksOfTheMonth + } + if sbbc.ObjectType != "" { + objectMap["objectType"] = sbbc.ObjectType + } + return json.Marshal(objectMap) +} + +// AsScheduleBasedBackupCriteria is the BasicBackupCriteria implementation for ScheduleBasedBackupCriteria. +func (sbbc ScheduleBasedBackupCriteria) AsScheduleBasedBackupCriteria() (*ScheduleBasedBackupCriteria, bool) { + return &sbbc, true +} + +// AsBackupCriteria is the BasicBackupCriteria implementation for ScheduleBasedBackupCriteria. +func (sbbc ScheduleBasedBackupCriteria) AsBackupCriteria() (*BackupCriteria, bool) { + return nil, false +} + +// AsBasicBackupCriteria is the BasicBackupCriteria implementation for ScheduleBasedBackupCriteria. +func (sbbc ScheduleBasedBackupCriteria) AsBasicBackupCriteria() (BasicBackupCriteria, bool) { + return &sbbc, true +} + +// ScheduleBasedTriggerContext schedule based trigger context +type ScheduleBasedTriggerContext struct { + // Schedule - Schedule for this backup + Schedule *BackupSchedule `json:"schedule,omitempty"` + // TaggingCriteria - List of tags that can be applicable for given schedule. + TaggingCriteria *[]TaggingCriteria `json:"taggingCriteria,omitempty"` + // ObjectType - Possible values include: 'ObjectTypeTriggerContext', 'ObjectTypeAdhocBasedTriggerContext', 'ObjectTypeScheduleBasedTriggerContext' + ObjectType ObjectTypeBasicTriggerContext `json:"objectType,omitempty"` +} + +// MarshalJSON is the custom marshaler for ScheduleBasedTriggerContext. +func (sbtc ScheduleBasedTriggerContext) MarshalJSON() ([]byte, error) { + sbtc.ObjectType = ObjectTypeScheduleBasedTriggerContext + objectMap := make(map[string]interface{}) + if sbtc.Schedule != nil { + objectMap["schedule"] = sbtc.Schedule + } + if sbtc.TaggingCriteria != nil { + objectMap["taggingCriteria"] = sbtc.TaggingCriteria + } + if sbtc.ObjectType != "" { + objectMap["objectType"] = sbtc.ObjectType + } + return json.Marshal(objectMap) +} + +// AsAdhocBasedTriggerContext is the BasicTriggerContext implementation for ScheduleBasedTriggerContext. +func (sbtc ScheduleBasedTriggerContext) AsAdhocBasedTriggerContext() (*AdhocBasedTriggerContext, bool) { + return nil, false +} + +// AsScheduleBasedTriggerContext is the BasicTriggerContext implementation for ScheduleBasedTriggerContext. +func (sbtc ScheduleBasedTriggerContext) AsScheduleBasedTriggerContext() (*ScheduleBasedTriggerContext, bool) { + return &sbtc, true +} + +// AsTriggerContext is the BasicTriggerContext implementation for ScheduleBasedTriggerContext. +func (sbtc ScheduleBasedTriggerContext) AsTriggerContext() (*TriggerContext, bool) { + return nil, false +} + +// AsBasicTriggerContext is the BasicTriggerContext implementation for ScheduleBasedTriggerContext. +func (sbtc ScheduleBasedTriggerContext) AsBasicTriggerContext() (BasicTriggerContext, bool) { + return &sbtc, true +} + +// SourceLifeCycle source LifeCycle +type SourceLifeCycle struct { + DeleteAfter BasicDeleteOption `json:"deleteAfter,omitempty"` + SourceDataStore *DataStoreInfoBase `json:"sourceDataStore,omitempty"` + TargetDataStoreCopySettings *[]TargetCopySetting `json:"targetDataStoreCopySettings,omitempty"` +} + +// UnmarshalJSON is the custom unmarshaler for SourceLifeCycle struct. +func (slc *SourceLifeCycle) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "deleteAfter": + if v != nil { + deleteAfter, err := unmarshalBasicDeleteOption(*v) + if err != nil { + return err + } + slc.DeleteAfter = deleteAfter + } + case "sourceDataStore": + if v != nil { + var sourceDataStore DataStoreInfoBase + err = json.Unmarshal(*v, &sourceDataStore) + if err != nil { + return err + } + slc.SourceDataStore = &sourceDataStore + } + case "targetDataStoreCopySettings": + if v != nil { + var targetDataStoreCopySettings []TargetCopySetting + err = json.Unmarshal(*v, &targetDataStoreCopySettings) + if err != nil { + return err + } + slc.TargetDataStoreCopySettings = &targetDataStoreCopySettings + } + } + } + + return nil +} + +// StorageSetting storage setting +type StorageSetting struct { + // DatastoreType - Gets or sets the type of the datastore. Possible values include: 'StorageSettingStoreTypesArchiveStore', 'StorageSettingStoreTypesSnapshotStore', 'StorageSettingStoreTypesVaultStore' + DatastoreType StorageSettingStoreTypes `json:"datastoreType,omitempty"` + // Type - Gets or sets the type. Possible values include: 'GeoRedundant', 'LocallyRedundant' + Type StorageSettingTypes `json:"type,omitempty"` +} + +// SupportedFeature elements class for feature request +type SupportedFeature struct { + // FeatureName - support feature type. + FeatureName *string `json:"featureName,omitempty"` + // SupportStatus - feature support status. Possible values include: 'FeatureSupportStatusInvalid', 'FeatureSupportStatusNotSupported', 'FeatureSupportStatusAlphaPreview', 'FeatureSupportStatusPrivatePreview', 'FeatureSupportStatusPublicPreview', 'FeatureSupportStatusGenerallyAvailable' + SupportStatus FeatureSupportStatus `json:"supportStatus,omitempty"` + // ExposureControlledFeatures - support feature type. + ExposureControlledFeatures *[]string `json:"exposureControlledFeatures,omitempty"` +} + +// SystemData metadata pertaining to creation and last modification of the resource. +type SystemData struct { + // CreatedBy - The identity that created the resource. + CreatedBy *string `json:"createdBy,omitempty"` + // CreatedByType - The type of identity that created the resource. Possible values include: 'User', 'Application', 'ManagedIdentity', 'Key' + CreatedByType CreatedByType `json:"createdByType,omitempty"` + // CreatedAt - The timestamp of resource creation (UTC). + CreatedAt *date.Time `json:"createdAt,omitempty"` + // LastModifiedBy - The identity that last modified the resource. + LastModifiedBy *string `json:"lastModifiedBy,omitempty"` + // LastModifiedByType - The type of identity that last modified the resource. Possible values include: 'User', 'Application', 'ManagedIdentity', 'Key' + LastModifiedByType CreatedByType `json:"lastModifiedByType,omitempty"` + // LastModifiedAt - The type of identity that last modified the resource. + LastModifiedAt *date.Time `json:"lastModifiedAt,omitempty"` +} + +// TaggingCriteria tagging criteria +type TaggingCriteria struct { + // Criteria - Criteria which decides whether the tag can be applied to a triggered backup. + Criteria *[]BasicBackupCriteria `json:"criteria,omitempty"` + // IsDefault - Specifies if tag is default. + IsDefault *bool `json:"isDefault,omitempty"` + // TaggingPriority - Retention Tag priority. + TaggingPriority *int64 `json:"taggingPriority,omitempty"` + // TagInfo - Retention tag information + TagInfo *RetentionTag `json:"tagInfo,omitempty"` +} + +// UnmarshalJSON is the custom unmarshaler for TaggingCriteria struct. +func (tc *TaggingCriteria) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "criteria": + if v != nil { + criteria, err := unmarshalBasicBackupCriteriaArray(*v) + if err != nil { + return err + } + tc.Criteria = &criteria + } + case "isDefault": + if v != nil { + var isDefault bool + err = json.Unmarshal(*v, &isDefault) + if err != nil { + return err + } + tc.IsDefault = &isDefault + } + case "taggingPriority": + if v != nil { + var taggingPriority int64 + err = json.Unmarshal(*v, &taggingPriority) + if err != nil { + return err + } + tc.TaggingPriority = &taggingPriority + } + case "tagInfo": + if v != nil { + var tagInfo RetentionTag + err = json.Unmarshal(*v, &tagInfo) + if err != nil { + return err + } + tc.TagInfo = &tagInfo + } + } + } + + return nil +} + +// TargetCopySetting target copy settings +type TargetCopySetting struct { + // CopyAfter - It can be CustomCopyOption or ImmediateCopyOption. + CopyAfter BasicCopyOption `json:"copyAfter,omitempty"` + // DataStore - Info of target datastore + DataStore *DataStoreInfoBase `json:"dataStore,omitempty"` +} + +// UnmarshalJSON is the custom unmarshaler for TargetCopySetting struct. +func (tcs *TargetCopySetting) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "copyAfter": + if v != nil { + copyAfter, err := unmarshalBasicCopyOption(*v) + if err != nil { + return err + } + tcs.CopyAfter = copyAfter + } + case "dataStore": + if v != nil { + var dataStore DataStoreInfoBase + err = json.Unmarshal(*v, &dataStore) + if err != nil { + return err + } + tcs.DataStore = &dataStore + } + } + } + + return nil +} + +// TargetDetails class encapsulating target details, used where the destination is not a datasource +type TargetDetails struct { + // FilePrefix - Restore operation may create multiple files inside location pointed by Url + // Below will be the common prefix for all of them + FilePrefix *string `json:"filePrefix,omitempty"` + // RestoreTargetLocationType - Denotes the target location where the data will be restored, + // string value for the enum {Microsoft.Internal.AzureBackup.DataProtection.Common.Interface.RestoreTargetLocationType}. Possible values include: 'RestoreTargetLocationTypeInvalid', 'RestoreTargetLocationTypeAzureBlobs', 'RestoreTargetLocationTypeAzureFiles' + RestoreTargetLocationType RestoreTargetLocationType `json:"restoreTargetLocationType,omitempty"` + // URL - Url denoting the restore destination. It can point to container / file share etc + URL *string `json:"url,omitempty"` +} + +// TriggerBackupRequest trigger backup request +type TriggerBackupRequest struct { + // BackupRuleOptions - Name for the Rule of the Policy which needs to be applied for this backup + BackupRuleOptions *AdHocBackupRuleOptions `json:"backupRuleOptions,omitempty"` +} + +// BasicTriggerContext trigger context +type BasicTriggerContext interface { + AsAdhocBasedTriggerContext() (*AdhocBasedTriggerContext, bool) + AsScheduleBasedTriggerContext() (*ScheduleBasedTriggerContext, bool) + AsTriggerContext() (*TriggerContext, bool) +} + +// TriggerContext trigger context +type TriggerContext struct { + // ObjectType - Possible values include: 'ObjectTypeTriggerContext', 'ObjectTypeAdhocBasedTriggerContext', 'ObjectTypeScheduleBasedTriggerContext' + ObjectType ObjectTypeBasicTriggerContext `json:"objectType,omitempty"` +} + +func unmarshalBasicTriggerContext(body []byte) (BasicTriggerContext, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["objectType"] { + case string(ObjectTypeAdhocBasedTriggerContext): + var abtc AdhocBasedTriggerContext + err := json.Unmarshal(body, &abtc) + return abtc, err + case string(ObjectTypeScheduleBasedTriggerContext): + var sbtc ScheduleBasedTriggerContext + err := json.Unmarshal(body, &sbtc) + return sbtc, err + default: + var tc TriggerContext + err := json.Unmarshal(body, &tc) + return tc, err + } +} +func unmarshalBasicTriggerContextArray(body []byte) ([]BasicTriggerContext, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + tcArray := make([]BasicTriggerContext, len(rawMessages)) + + for index, rawMessage := range rawMessages { + tc, err := unmarshalBasicTriggerContext(*rawMessage) + if err != nil { + return nil, err + } + tcArray[index] = tc + } + return tcArray, nil +} + +// MarshalJSON is the custom marshaler for TriggerContext. +func (tc TriggerContext) MarshalJSON() ([]byte, error) { + tc.ObjectType = ObjectTypeTriggerContext + objectMap := make(map[string]interface{}) + if tc.ObjectType != "" { + objectMap["objectType"] = tc.ObjectType + } + return json.Marshal(objectMap) +} + +// AsAdhocBasedTriggerContext is the BasicTriggerContext implementation for TriggerContext. +func (tc TriggerContext) AsAdhocBasedTriggerContext() (*AdhocBasedTriggerContext, bool) { + return nil, false +} + +// AsScheduleBasedTriggerContext is the BasicTriggerContext implementation for TriggerContext. +func (tc TriggerContext) AsScheduleBasedTriggerContext() (*ScheduleBasedTriggerContext, bool) { + return nil, false +} + +// AsTriggerContext is the BasicTriggerContext implementation for TriggerContext. +func (tc TriggerContext) AsTriggerContext() (*TriggerContext, bool) { + return &tc, true +} + +// AsBasicTriggerContext is the BasicTriggerContext implementation for TriggerContext. +func (tc TriggerContext) AsBasicTriggerContext() (BasicTriggerContext, bool) { + return &tc, true +} + +// UserFacingError error object used by layers that have access to localized content, and propagate that to +// user +type UserFacingError struct { + // Code - Unique code for this error + Code *string `json:"code,omitempty"` + // Details - Additional related Errors + Details *[]UserFacingError `json:"details,omitempty"` + // InnerError - Inner Error + InnerError *InnerError `json:"innerError,omitempty"` + // IsRetryable - Whether the operation will be retryable or not + IsRetryable *bool `json:"isRetryable,omitempty"` + // IsUserError - Whether the operation is due to a user error or service error + IsUserError *bool `json:"isUserError,omitempty"` + // Properties - Any key value pairs that can be injected inside error object + Properties map[string]*string `json:"properties"` + Message *string `json:"message,omitempty"` + // RecommendedAction - RecommendedAction � localized. + RecommendedAction *[]string `json:"recommendedAction,omitempty"` + // Target - Target of the error. + Target *string `json:"target,omitempty"` +} + +// MarshalJSON is the custom marshaler for UserFacingError. +func (ufe UserFacingError) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ufe.Code != nil { + objectMap["code"] = ufe.Code + } + if ufe.Details != nil { + objectMap["details"] = ufe.Details + } + if ufe.InnerError != nil { + objectMap["innerError"] = ufe.InnerError + } + if ufe.IsRetryable != nil { + objectMap["isRetryable"] = ufe.IsRetryable + } + if ufe.IsUserError != nil { + objectMap["isUserError"] = ufe.IsUserError + } + if ufe.Properties != nil { + objectMap["properties"] = ufe.Properties + } + if ufe.Message != nil { + objectMap["message"] = ufe.Message + } + if ufe.RecommendedAction != nil { + objectMap["recommendedAction"] = ufe.RecommendedAction + } + if ufe.Target != nil { + objectMap["target"] = ufe.Target + } + return json.Marshal(objectMap) +} + +// ValidateForBackupRequest validate for backup request +type ValidateForBackupRequest struct { + BackupInstance *BackupInstance `json:"backupInstance,omitempty"` +} + +// ValidateRestoreRequestObject validate restore request object +type ValidateRestoreRequestObject struct { + // RestoreRequestObject - Gets or sets the restore request object. + RestoreRequestObject BasicAzureBackupRestoreRequest `json:"restoreRequestObject,omitempty"` +} + +// UnmarshalJSON is the custom unmarshaler for ValidateRestoreRequestObject struct. +func (vrro *ValidateRestoreRequestObject) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "restoreRequestObject": + if v != nil { + restoreRequestObject, err := unmarshalBasicAzureBackupRestoreRequest(*v) + if err != nil { + return err + } + vrro.RestoreRequestObject = restoreRequestObject + } + } + } + + return nil +} diff --git a/azurerm/internal/services/dataprotection/legacysdk/dataprotection/operationresult.go b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/operationresult.go new file mode 100644 index 000000000000..8b796ce38fee --- /dev/null +++ b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/operationresult.go @@ -0,0 +1,105 @@ +package dataprotection + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" +) + +// OperationResultClient is the open API 2.0 Specs for Azure Data Protection service +type OperationResultClient struct { + BaseClient +} + +// NewOperationResultClient creates an instance of the OperationResultClient client. +func NewOperationResultClient(subscriptionID string) OperationResultClient { + return NewOperationResultClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewOperationResultClientWithBaseURI creates an instance of the OperationResultClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewOperationResultClientWithBaseURI(baseURI string, subscriptionID string) OperationResultClient { + return OperationResultClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets the operation result for a resource +func (client OperationResultClient) Get(ctx context.Context, operationID string, location string) (result OperationJobExtendedInfo, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OperationResultClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, operationID, location) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.OperationResultClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dataprotection.OperationResultClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.OperationResultClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client OperationResultClient) GetPreparer(ctx context.Context, operationID string, location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "operationId": autorest.Encode("path", operationID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.DataProtection/locations/{location}/operationResults/{operationId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client OperationResultClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client OperationResultClient) GetResponder(resp *http.Response) (result OperationJobExtendedInfo, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/azurerm/internal/services/dataprotection/legacysdk/dataprotection/operations.go b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/operations.go new file mode 100644 index 000000000000..60c55383fa18 --- /dev/null +++ b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/operations.go @@ -0,0 +1,141 @@ +package dataprotection + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" +) + +// OperationsClient is the open API 2.0 Specs for Azure Data Protection service +type OperationsClient struct { + BaseClient +} + +// NewOperationsClient creates an instance of the OperationsClient client. +func NewOperationsClient(subscriptionID string) OperationsClient { + return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client using a custom endpoint. Use this +// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { + return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List returns the list of available operations. +func (client OperationsClient) List(ctx context.Context) (result ClientDiscoveryResponsePage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List") + defer func() { + sc := -1 + if result.cdr.Response.Response != nil { + sc = result.cdr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.OperationsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.cdr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dataprotection.OperationsClient", "List", resp, "Failure sending request") + return + } + + result.cdr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.OperationsClient", "List", resp, "Failure responding to request") + return + } + if result.cdr.hasNextLink() && result.cdr.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// ListPreparer prepares the List request. +func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/Microsoft.DataProtection/operations"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client OperationsClient) ListResponder(resp *http.Response) (result ClientDiscoveryResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client OperationsClient) listNextResults(ctx context.Context, lastResults ClientDiscoveryResponse) (result ClientDiscoveryResponse, err error) { + req, err := lastResults.clientDiscoveryResponsePreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "dataprotection.OperationsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "dataprotection.OperationsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.OperationsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client OperationsClient) ListComplete(ctx context.Context) (result ClientDiscoveryResponseIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx) + return +} diff --git a/azurerm/internal/services/dataprotection/legacysdk/dataprotection/recoverypoint.go b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/recoverypoint.go new file mode 100644 index 000000000000..56ef5dcd0729 --- /dev/null +++ b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/recoverypoint.go @@ -0,0 +1,111 @@ +package dataprotection + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" +) + +// RecoveryPointClient is the open API 2.0 Specs for Azure Data Protection service +type RecoveryPointClient struct { + BaseClient +} + +// NewRecoveryPointClient creates an instance of the RecoveryPointClient client. +func NewRecoveryPointClient(subscriptionID string) RecoveryPointClient { + return NewRecoveryPointClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewRecoveryPointClientWithBaseURI creates an instance of the RecoveryPointClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewRecoveryPointClientWithBaseURI(baseURI string, subscriptionID string) RecoveryPointClient { + return RecoveryPointClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets a Recovery Point using recoveryPointId for a Datasource. +// Parameters: +// vaultName - the name of the backup vault. +// resourceGroupName - the name of the resource group where the backup vault is present. +// backupInstanceName - the name of the backup instance +func (client RecoveryPointClient) Get(ctx context.Context, vaultName string, resourceGroupName string, backupInstanceName string, recoveryPointID string) (result AzureBackupRecoveryPointResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecoveryPointClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, vaultName, resourceGroupName, backupInstanceName, recoveryPointID) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.RecoveryPointClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dataprotection.RecoveryPointClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.RecoveryPointClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client RecoveryPointClient) GetPreparer(ctx context.Context, vaultName string, resourceGroupName string, backupInstanceName string, recoveryPointID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "backupInstanceName": autorest.Encode("path", backupInstanceName), + "recoveryPointId": autorest.Encode("path", recoveryPointID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}/backupInstances/{backupInstanceName}/recoveryPoints/{recoveryPointId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client RecoveryPointClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client RecoveryPointClient) GetResponder(resp *http.Response) (result AzureBackupRecoveryPointResource, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/azurerm/internal/services/dataprotection/legacysdk/dataprotection/recoverypoints.go b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/recoverypoints.go new file mode 100644 index 000000000000..64be554ba000 --- /dev/null +++ b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/recoverypoints.go @@ -0,0 +1,160 @@ +package dataprotection + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" +) + +// RecoveryPointsClient is the open API 2.0 Specs for Azure Data Protection service +type RecoveryPointsClient struct { + BaseClient +} + +// NewRecoveryPointsClient creates an instance of the RecoveryPointsClient client. +func NewRecoveryPointsClient(subscriptionID string) RecoveryPointsClient { + return NewRecoveryPointsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewRecoveryPointsClientWithBaseURI creates an instance of the RecoveryPointsClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewRecoveryPointsClientWithBaseURI(baseURI string, subscriptionID string) RecoveryPointsClient { + return RecoveryPointsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// GetList returns a list of Recovery Points for a DataSource in a vault. +// Parameters: +// vaultName - the name of the backup vault. +// resourceGroupName - the name of the resource group where the backup vault is present. +// backupInstanceName - the name of the backup instance +// filter - oData filter options. +// skipToken - skipToken Filter. +func (client RecoveryPointsClient) GetList(ctx context.Context, vaultName string, resourceGroupName string, backupInstanceName string, filter string, skipToken string) (result AzureBackupRecoveryPointResourceListPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecoveryPointsClient.GetList") + defer func() { + sc := -1 + if result.abrprl.Response.Response != nil { + sc = result.abrprl.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.getListNextResults + req, err := client.GetListPreparer(ctx, vaultName, resourceGroupName, backupInstanceName, filter, skipToken) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.RecoveryPointsClient", "GetList", nil, "Failure preparing request") + return + } + + resp, err := client.GetListSender(req) + if err != nil { + result.abrprl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dataprotection.RecoveryPointsClient", "GetList", resp, "Failure sending request") + return + } + + result.abrprl, err = client.GetListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.RecoveryPointsClient", "GetList", resp, "Failure responding to request") + return + } + if result.abrprl.hasNextLink() && result.abrprl.IsEmpty() { + err = result.NextWithContext(ctx) + return + } + + return +} + +// GetListPreparer prepares the GetList request. +func (client RecoveryPointsClient) GetListPreparer(ctx context.Context, vaultName string, resourceGroupName string, backupInstanceName string, filter string, skipToken string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "backupInstanceName": autorest.Encode("path", backupInstanceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vaultName": autorest.Encode("path", vaultName), + } + + const APIVersion = "2021-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if len(skipToken) > 0 { + queryParameters["$skipToken"] = autorest.Encode("query", skipToken) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection/backupVaults/{vaultName}/backupInstances/{backupInstanceName}/recoveryPoints", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetListSender sends the GetList request. The method will close the +// http.Response Body if it receives an error. +func (client RecoveryPointsClient) GetListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetListResponder handles the response to the GetList request. The method always +// closes the http.Response Body. +func (client RecoveryPointsClient) GetListResponder(resp *http.Response) (result AzureBackupRecoveryPointResourceList, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// getListNextResults retrieves the next set of results, if any. +func (client RecoveryPointsClient) getListNextResults(ctx context.Context, lastResults AzureBackupRecoveryPointResourceList) (result AzureBackupRecoveryPointResourceList, err error) { + req, err := lastResults.azureBackupRecoveryPointResourceListPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "dataprotection.RecoveryPointsClient", "getListNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.GetListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "dataprotection.RecoveryPointsClient", "getListNextResults", resp, "Failure sending next results request") + } + result, err = client.GetListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dataprotection.RecoveryPointsClient", "getListNextResults", resp, "Failure responding to next results request") + } + return +} + +// GetListComplete enumerates all values, automatically crossing page boundaries as required. +func (client RecoveryPointsClient) GetListComplete(ctx context.Context, vaultName string, resourceGroupName string, backupInstanceName string, filter string, skipToken string) (result AzureBackupRecoveryPointResourceListIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecoveryPointsClient.GetList") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.GetList(ctx, vaultName, resourceGroupName, backupInstanceName, filter, skipToken) + return +} diff --git a/azurerm/internal/services/dataprotection/legacysdk/dataprotection/version.go b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/version.go new file mode 100644 index 000000000000..85bf3d88aba7 --- /dev/null +++ b/azurerm/internal/services/dataprotection/legacysdk/dataprotection/version.go @@ -0,0 +1,19 @@ +package dataprotection + +import "github.com/Azure/azure-sdk-for-go/version" + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/" + Version() + " dataprotection/2021-01-01" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return version.Number +} diff --git a/azurerm/internal/services/dataprotection/parse/backup_instance.go b/azurerm/internal/services/dataprotection/parse/backup_instance.go new file mode 100644 index 000000000000..6e14ab21edb2 --- /dev/null +++ b/azurerm/internal/services/dataprotection/parse/backup_instance.go @@ -0,0 +1,75 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type BackupInstanceId struct { + SubscriptionId string + ResourceGroup string + BackupVaultName string + Name string +} + +func NewBackupInstanceID(subscriptionId, resourceGroup, backupVaultName, name string) BackupInstanceId { + return BackupInstanceId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + BackupVaultName: backupVaultName, + Name: name, + } +} + +func (id BackupInstanceId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Backup Vault Name %q", id.BackupVaultName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Backup Instance", segmentsStr) +} + +func (id BackupInstanceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataProtection/backupVaults/%s/backupInstances/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.BackupVaultName, id.Name) +} + +// BackupInstanceID parses a BackupInstance ID into an BackupInstanceId struct +func BackupInstanceID(input string) (*BackupInstanceId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := BackupInstanceId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.BackupVaultName, err = id.PopSegment("backupVaults"); err != nil { + return nil, err + } + if resourceId.Name, err = id.PopSegment("backupInstances"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/dataprotection/parse/backup_instance_test.go b/azurerm/internal/services/dataprotection/parse/backup_instance_test.go new file mode 100644 index 000000000000..15cbc3c02bc1 --- /dev/null +++ b/azurerm/internal/services/dataprotection/parse/backup_instance_test.go @@ -0,0 +1,128 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = BackupInstanceId{} + +func TestBackupInstanceIDFormatter(t *testing.T) { + actual := NewBackupInstanceID("12345678-1234-9876-4563-123456789012", "resourceGroup1", "vault1", "backupInstance1").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/vault1/backupInstances/backupInstance1" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestBackupInstanceID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *BackupInstanceId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing BackupVaultName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/", + Error: true, + }, + + { + // missing value for BackupVaultName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/vault1/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/vault1/backupInstances/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/vault1/backupInstances/backupInstance1", + Expected: &BackupInstanceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resourceGroup1", + BackupVaultName: "vault1", + Name: "backupInstance1", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESOURCEGROUP1/PROVIDERS/MICROSOFT.DATAPROTECTION/BACKUPVAULTS/VAULT1/BACKUPINSTANCES/BACKUPINSTANCE1", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := BackupInstanceID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.BackupVaultName != v.Expected.BackupVaultName { + t.Fatalf("Expected %q but got %q for BackupVaultName", v.Expected.BackupVaultName, actual.BackupVaultName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/dataprotection/parse/backup_policy.go b/azurerm/internal/services/dataprotection/parse/backup_policy.go new file mode 100644 index 000000000000..242d360697e1 --- /dev/null +++ b/azurerm/internal/services/dataprotection/parse/backup_policy.go @@ -0,0 +1,75 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type BackupPolicyId struct { + SubscriptionId string + ResourceGroup string + BackupVaultName string + Name string +} + +func NewBackupPolicyID(subscriptionId, resourceGroup, backupVaultName, name string) BackupPolicyId { + return BackupPolicyId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + BackupVaultName: backupVaultName, + Name: name, + } +} + +func (id BackupPolicyId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Backup Vault Name %q", id.BackupVaultName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Backup Policy", segmentsStr) +} + +func (id BackupPolicyId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataProtection/backupVaults/%s/backupPolicies/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.BackupVaultName, id.Name) +} + +// BackupPolicyID parses a BackupPolicy ID into an BackupPolicyId struct +func BackupPolicyID(input string) (*BackupPolicyId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := BackupPolicyId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.BackupVaultName, err = id.PopSegment("backupVaults"); err != nil { + return nil, err + } + if resourceId.Name, err = id.PopSegment("backupPolicies"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/dataprotection/parse/backup_policy_test.go b/azurerm/internal/services/dataprotection/parse/backup_policy_test.go new file mode 100644 index 000000000000..a03bd97356bb --- /dev/null +++ b/azurerm/internal/services/dataprotection/parse/backup_policy_test.go @@ -0,0 +1,128 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = BackupPolicyId{} + +func TestBackupPolicyIDFormatter(t *testing.T) { + actual := NewBackupPolicyID("12345678-1234-9876-4563-123456789012", "resourceGroup1", "vault1", "backupPolicy1").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/vault1/backupPolicies/backupPolicy1" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestBackupPolicyID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *BackupPolicyId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing BackupVaultName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/", + Error: true, + }, + + { + // missing value for BackupVaultName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/vault1/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/vault1/backupPolicies/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/vault1/backupPolicies/backupPolicy1", + Expected: &BackupPolicyId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resourceGroup1", + BackupVaultName: "vault1", + Name: "backupPolicy1", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESOURCEGROUP1/PROVIDERS/MICROSOFT.DATAPROTECTION/BACKUPVAULTS/VAULT1/BACKUPPOLICIES/BACKUPPOLICY1", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := BackupPolicyID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.BackupVaultName != v.Expected.BackupVaultName { + t.Fatalf("Expected %q but got %q for BackupVaultName", v.Expected.BackupVaultName, actual.BackupVaultName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/dataprotection/parse/backup_vault.go b/azurerm/internal/services/dataprotection/parse/backup_vault.go new file mode 100644 index 000000000000..a19db8e2f9a0 --- /dev/null +++ b/azurerm/internal/services/dataprotection/parse/backup_vault.go @@ -0,0 +1,69 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type BackupVaultId struct { + SubscriptionId string + ResourceGroup string + Name string +} + +func NewBackupVaultID(subscriptionId, resourceGroup, name string) BackupVaultId { + return BackupVaultId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + Name: name, + } +} + +func (id BackupVaultId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Backup Vault", segmentsStr) +} + +func (id BackupVaultId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataProtection/backupVaults/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.Name) +} + +// BackupVaultID parses a BackupVault ID into an BackupVaultId struct +func BackupVaultID(input string) (*BackupVaultId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := BackupVaultId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.Name, err = id.PopSegment("backupVaults"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/dataprotection/parse/backup_vault_test.go b/azurerm/internal/services/dataprotection/parse/backup_vault_test.go new file mode 100644 index 000000000000..c4c9bf06ccfb --- /dev/null +++ b/azurerm/internal/services/dataprotection/parse/backup_vault_test.go @@ -0,0 +1,112 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = BackupVaultId{} + +func TestBackupVaultIDFormatter(t *testing.T) { + actual := NewBackupVaultID("12345678-1234-9876-4563-123456789012", "resourceGroup1", "vault1").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/vault1" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestBackupVaultID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *BackupVaultId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/vault1", + Expected: &BackupVaultId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resourceGroup1", + Name: "vault1", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESOURCEGROUP1/PROVIDERS/MICROSOFT.DATAPROTECTION/BACKUPVAULTS/VAULT1", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := BackupVaultID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/dataprotection/registration.go b/azurerm/internal/services/dataprotection/registration.go new file mode 100644 index 000000000000..191286b227ee --- /dev/null +++ b/azurerm/internal/services/dataprotection/registration.go @@ -0,0 +1,33 @@ +package dataprotection + +import "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + +type Registration struct{} + +// Name is the name of this Service +func (r Registration) Name() string { + return "DataProtection" +} + +// WebsiteCategories returns a list of categories which can be used for the sidebar +func (r Registration) WebsiteCategories() []string { + return []string{ + "DataProtection", + } +} + +// SupportedDataSources returns the supported Data Sources supported by this Service +func (r Registration) SupportedDataSources() map[string]*pluginsdk.Resource { + return map[string]*pluginsdk.Resource{} +} + +// SupportedResources returns the supported Resources supported by this Service +func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { + return map[string]*pluginsdk.Resource{ + "azurerm_data_protection_backup_vault": resourceDataProtectionBackupVault(), + "azurerm_data_protection_backup_policy_blob_storage": resourceDataProtectionBackupPolicyBlobStorage(), + "azurerm_data_protection_backup_policy_disk": resourceDataProtectionBackupPolicyDisk(), + "azurerm_data_protection_backup_policy_postgresql": resourceDataProtectionBackupPolicyPostgreSQL(), + "azurerm_data_protection_backup_instance_postgresql": resourceDataProtectionBackupInstancePostgreSQL(), + } +} diff --git a/azurerm/internal/services/dataprotection/resourceids.go b/azurerm/internal/services/dataprotection/resourceids.go new file mode 100644 index 000000000000..08d621225861 --- /dev/null +++ b/azurerm/internal/services/dataprotection/resourceids.go @@ -0,0 +1,5 @@ +package dataprotection + +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=BackupVault -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/vault1 +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=BackupPolicy -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/vault1/backupPolicies/backupPolicy1 +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=BackupInstance -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/vault1/backupInstances/backupInstance1 diff --git a/azurerm/internal/services/dataprotection/validate/backup_instance_id.go b/azurerm/internal/services/dataprotection/validate/backup_instance_id.go new file mode 100644 index 000000000000..7b58dc7777a5 --- /dev/null +++ b/azurerm/internal/services/dataprotection/validate/backup_instance_id.go @@ -0,0 +1,23 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection/parse" +) + +func BackupInstanceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := parse.BackupInstanceID(v); err != nil { + errors = append(errors, err) + } + + return +} diff --git a/azurerm/internal/services/dataprotection/validate/backup_instance_id_test.go b/azurerm/internal/services/dataprotection/validate/backup_instance_id_test.go new file mode 100644 index 000000000000..198efb5bd556 --- /dev/null +++ b/azurerm/internal/services/dataprotection/validate/backup_instance_id_test.go @@ -0,0 +1,88 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import "testing" + +func TestBackupInstanceID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + // missing BackupVaultName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/", + Valid: false, + }, + + { + // missing value for BackupVaultName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/", + Valid: false, + }, + + { + // missing Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/vault1/", + Valid: false, + }, + + { + // missing value for Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/vault1/backupInstances/", + Valid: false, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/vault1/backupInstances/backupInstance1", + Valid: true, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESOURCEGROUP1/PROVIDERS/MICROSOFT.DATAPROTECTION/BACKUPVAULTS/VAULT1/BACKUPINSTANCES/BACKUPINSTANCE1", + Valid: false, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := BackupInstanceID(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/azurerm/internal/services/dataprotection/validate/backup_policy_id.go b/azurerm/internal/services/dataprotection/validate/backup_policy_id.go new file mode 100644 index 000000000000..903e7bad3ca4 --- /dev/null +++ b/azurerm/internal/services/dataprotection/validate/backup_policy_id.go @@ -0,0 +1,23 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection/parse" +) + +func BackupPolicyID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := parse.BackupPolicyID(v); err != nil { + errors = append(errors, err) + } + + return +} diff --git a/azurerm/internal/services/dataprotection/validate/backup_policy_id_test.go b/azurerm/internal/services/dataprotection/validate/backup_policy_id_test.go new file mode 100644 index 000000000000..33353158919c --- /dev/null +++ b/azurerm/internal/services/dataprotection/validate/backup_policy_id_test.go @@ -0,0 +1,88 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import "testing" + +func TestBackupPolicyID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + // missing BackupVaultName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/", + Valid: false, + }, + + { + // missing value for BackupVaultName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/", + Valid: false, + }, + + { + // missing Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/vault1/", + Valid: false, + }, + + { + // missing value for Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/vault1/backupPolicies/", + Valid: false, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/vault1/backupPolicies/backupPolicy1", + Valid: true, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESOURCEGROUP1/PROVIDERS/MICROSOFT.DATAPROTECTION/BACKUPVAULTS/VAULT1/BACKUPPOLICIES/BACKUPPOLICY1", + Valid: false, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := BackupPolicyID(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/azurerm/internal/services/dataprotection/validate/backup_vault_id.go b/azurerm/internal/services/dataprotection/validate/backup_vault_id.go new file mode 100644 index 000000000000..bce565bf8557 --- /dev/null +++ b/azurerm/internal/services/dataprotection/validate/backup_vault_id.go @@ -0,0 +1,23 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/dataprotection/parse" +) + +func BackupVaultID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := parse.BackupVaultID(v); err != nil { + errors = append(errors, err) + } + + return +} diff --git a/azurerm/internal/services/dataprotection/validate/backup_vault_id_test.go b/azurerm/internal/services/dataprotection/validate/backup_vault_id_test.go new file mode 100644 index 000000000000..aa463cc3f948 --- /dev/null +++ b/azurerm/internal/services/dataprotection/validate/backup_vault_id_test.go @@ -0,0 +1,76 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import "testing" + +func TestBackupVaultID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + // missing Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/", + Valid: false, + }, + + { + // missing value for Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/", + Valid: false, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroup1/providers/Microsoft.DataProtection/backupVaults/vault1", + Valid: true, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESOURCEGROUP1/PROVIDERS/MICROSOFT.DATAPROTECTION/BACKUPVAULTS/VAULT1", + Valid: false, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := BackupVaultID(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/azurerm/internal/services/desktopvirtualization/client/client.go b/azurerm/internal/services/desktopvirtualization/client/client.go index 0718894cd3d2..970a93b1104a 100644 --- a/azurerm/internal/services/desktopvirtualization/client/client.go +++ b/azurerm/internal/services/desktopvirtualization/client/client.go @@ -1,12 +1,13 @@ package client import ( - "github.com/Azure/azure-sdk-for-go/services/preview/desktopvirtualization/mgmt/2019-12-10-preview/desktopvirtualization" + "github.com/Azure/azure-sdk-for-go/services/preview/desktopvirtualization/mgmt/2020-11-02-preview/desktopvirtualization" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/common" ) type Client struct { ApplicationGroupsClient *desktopvirtualization.ApplicationGroupsClient + ApplicationsClient *desktopvirtualization.ApplicationsClient DesktopsClient *desktopvirtualization.DesktopsClient HostPoolsClient *desktopvirtualization.HostPoolsClient OperationsClient *desktopvirtualization.OperationsClient @@ -19,6 +20,9 @@ func NewClient(o *common.ClientOptions) *Client { ApplicationGroupsClient := desktopvirtualization.NewApplicationGroupsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&ApplicationGroupsClient.Client, o.ResourceManagerAuthorizer) + ApplicationsClient := desktopvirtualization.NewApplicationsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&ApplicationsClient.Client, o.ResourceManagerAuthorizer) + DesktopsClient := desktopvirtualization.NewDesktopsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&DesktopsClient.Client, o.ResourceManagerAuthorizer) @@ -36,6 +40,7 @@ func NewClient(o *common.ClientOptions) *Client { return &Client{ ApplicationGroupsClient: &ApplicationGroupsClient, + ApplicationsClient: &ApplicationsClient, DesktopsClient: &DesktopsClient, HostPoolsClient: &HostPoolsClient, OperationsClient: &OperationsClient, diff --git a/azurerm/internal/services/desktopvirtualization/parse/application.go b/azurerm/internal/services/desktopvirtualization/parse/application.go new file mode 100644 index 000000000000..408219bb691a --- /dev/null +++ b/azurerm/internal/services/desktopvirtualization/parse/application.go @@ -0,0 +1,131 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type ApplicationId struct { + SubscriptionId string + ResourceGroup string + ApplicationGroupName string + Name string +} + +func NewApplicationID(subscriptionId, resourceGroup, applicationGroupName, name string) ApplicationId { + return ApplicationId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + ApplicationGroupName: applicationGroupName, + Name: name, + } +} + +func (id ApplicationId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Application Group Name %q", id.ApplicationGroupName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Application", segmentsStr) +} + +func (id ApplicationId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DesktopVirtualization/applicationGroups/%s/applications/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.ApplicationGroupName, id.Name) +} + +// ApplicationID parses a Application ID into an ApplicationId struct +func ApplicationID(input string) (*ApplicationId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := ApplicationId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.ApplicationGroupName, err = id.PopSegment("applicationGroups"); err != nil { + return nil, err + } + if resourceId.Name, err = id.PopSegment("applications"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} + +// ApplicationIDInsensitively parses an Application ID into an ApplicationId struct, insensitively +// This should only be used to parse an ID for rewriting, the ApplicationID +// method should be used instead for validation etc. +// +// Whilst this may seem strange, this enables Terraform have consistent casing +// which works around issues in Core, whilst handling broken API responses. +func ApplicationIDInsensitively(input string) (*ApplicationId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := ApplicationId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + // find the correct casing for the 'applicationGroups' segment + applicationGroupsKey := "applicationGroups" + for key := range id.Path { + if strings.EqualFold(key, applicationGroupsKey) { + applicationGroupsKey = key + break + } + } + if resourceId.ApplicationGroupName, err = id.PopSegment(applicationGroupsKey); err != nil { + return nil, err + } + + // find the correct casing for the 'applications' segment + applicationsKey := "applications" + for key := range id.Path { + if strings.EqualFold(key, applicationsKey) { + applicationsKey = key + break + } + } + if resourceId.Name, err = id.PopSegment(applicationsKey); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/desktopvirtualization/parse/application_test.go b/azurerm/internal/services/desktopvirtualization/parse/application_test.go new file mode 100644 index 000000000000..21ee6b41fc90 --- /dev/null +++ b/azurerm/internal/services/desktopvirtualization/parse/application_test.go @@ -0,0 +1,264 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = ApplicationId{} + +func TestApplicationIDFormatter(t *testing.T) { + actual := NewApplicationID("12345678-1234-9876-4563-123456789012", "resGroup1", "applicationGroup1", "application1").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/applicationGroups/applicationGroup1/applications/application1" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestApplicationID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ApplicationId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing ApplicationGroupName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/", + Error: true, + }, + + { + // missing value for ApplicationGroupName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/applicationGroups/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/applicationGroups/applicationGroup1/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/applicationGroups/applicationGroup1/applications/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/applicationGroups/applicationGroup1/applications/application1", + Expected: &ApplicationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + ApplicationGroupName: "applicationGroup1", + Name: "application1", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.DESKTOPVIRTUALIZATION/APPLICATIONGROUPS/APPLICATIONGROUP1/APPLICATIONS/APPLICATION1", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ApplicationID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.ApplicationGroupName != v.Expected.ApplicationGroupName { + t.Fatalf("Expected %q but got %q for ApplicationGroupName", v.Expected.ApplicationGroupName, actual.ApplicationGroupName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} + +func TestApplicationIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ApplicationId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing ApplicationGroupName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/", + Error: true, + }, + + { + // missing value for ApplicationGroupName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/applicationGroups/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/applicationGroups/applicationGroup1/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/applicationGroups/applicationGroup1/applications/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/applicationGroups/applicationGroup1/applications/application1", + Expected: &ApplicationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + ApplicationGroupName: "applicationGroup1", + Name: "application1", + }, + }, + + { + // lower-cased segment names + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/applicationgroups/applicationGroup1/applications/application1", + Expected: &ApplicationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + ApplicationGroupName: "applicationGroup1", + Name: "application1", + }, + }, + + { + // upper-cased segment names + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/APPLICATIONGROUPS/applicationGroup1/APPLICATIONS/application1", + Expected: &ApplicationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + ApplicationGroupName: "applicationGroup1", + Name: "application1", + }, + }, + + { + // mixed-cased segment names + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/ApPlIcAtIoNgRoUpS/applicationGroup1/ApPlIcAtIoNs/application1", + Expected: &ApplicationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + ApplicationGroupName: "applicationGroup1", + Name: "application1", + }, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ApplicationIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.ApplicationGroupName != v.Expected.ApplicationGroupName { + t.Fatalf("Expected %q but got %q for ApplicationGroupName", v.Expected.ApplicationGroupName, actual.ApplicationGroupName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/desktopvirtualization/registration.go b/azurerm/internal/services/desktopvirtualization/registration.go index 62c2a158df8c..819540f180c1 100644 --- a/azurerm/internal/services/desktopvirtualization/registration.go +++ b/azurerm/internal/services/desktopvirtualization/registration.go @@ -29,6 +29,7 @@ func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { "azurerm_virtual_desktop_workspace": resourceArmDesktopVirtualizationWorkspace(), "azurerm_virtual_desktop_host_pool": resourceVirtualDesktopHostPool(), "azurerm_virtual_desktop_application_group": resourceVirtualDesktopApplicationGroup(), + "azurerm_virtual_desktop_application": resourceVirtualDesktopApplication(), "azurerm_virtual_desktop_workspace_application_group_association": resourceVirtualDesktopWorkspaceApplicationGroupAssociation(), } } diff --git a/azurerm/internal/services/desktopvirtualization/resourcesid.go b/azurerm/internal/services/desktopvirtualization/resourcesid.go index 6b20621db9a9..3d49df141138 100644 --- a/azurerm/internal/services/desktopvirtualization/resourcesid.go +++ b/azurerm/internal/services/desktopvirtualization/resourcesid.go @@ -1,5 +1,6 @@ package desktopvirtualization //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=ApplicationGroup -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/applicationGroups/applicationGroup1 -rewrite=true +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=Application -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/applicationGroups/applicationGroup1/applications/application1 -rewrite=true //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=HostPool -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/hostPools/pool1 -rewrite=true //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=Workspace -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/workspaces/workspace1 diff --git a/azurerm/internal/services/desktopvirtualization/validate/application_id.go b/azurerm/internal/services/desktopvirtualization/validate/application_id.go new file mode 100644 index 000000000000..7ca66d9b4b79 --- /dev/null +++ b/azurerm/internal/services/desktopvirtualization/validate/application_id.go @@ -0,0 +1,23 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/desktopvirtualization/parse" +) + +func ApplicationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := parse.ApplicationID(v); err != nil { + errors = append(errors, err) + } + + return +} diff --git a/azurerm/internal/services/desktopvirtualization/validate/application_id_test.go b/azurerm/internal/services/desktopvirtualization/validate/application_id_test.go new file mode 100644 index 000000000000..61765d81af9d --- /dev/null +++ b/azurerm/internal/services/desktopvirtualization/validate/application_id_test.go @@ -0,0 +1,88 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import "testing" + +func TestApplicationID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + // missing ApplicationGroupName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/", + Valid: false, + }, + + { + // missing value for ApplicationGroupName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/applicationGroups/", + Valid: false, + }, + + { + // missing Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/applicationGroups/applicationGroup1/", + Valid: false, + }, + + { + // missing value for Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/applicationGroups/applicationGroup1/applications/", + Valid: false, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.DesktopVirtualization/applicationGroups/applicationGroup1/applications/application1", + Valid: true, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.DESKTOPVIRTUALIZATION/APPLICATIONGROUPS/APPLICATIONGROUP1/APPLICATIONS/APPLICATION1", + Valid: false, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := ApplicationID(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/azurerm/internal/services/desktopvirtualization/virtual_desktop_application_group_resource.go b/azurerm/internal/services/desktopvirtualization/virtual_desktop_application_group_resource.go index 90d51cc2a6ee..c7fce18431df 100644 --- a/azurerm/internal/services/desktopvirtualization/virtual_desktop_application_group_resource.go +++ b/azurerm/internal/services/desktopvirtualization/virtual_desktop_application_group_resource.go @@ -6,13 +6,12 @@ import ( "regexp" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/desktopvirtualization/migration" - - "github.com/Azure/azure-sdk-for-go/services/preview/desktopvirtualization/mgmt/2019-12-10-preview/desktopvirtualization" + "github.com/Azure/azure-sdk-for-go/services/preview/desktopvirtualization/mgmt/2020-11-02-preview/desktopvirtualization" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/desktopvirtualization/migration" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/desktopvirtualization/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/desktopvirtualization/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" diff --git a/azurerm/internal/services/desktopvirtualization/virtual_desktop_application_resource.go b/azurerm/internal/services/desktopvirtualization/virtual_desktop_application_resource.go new file mode 100644 index 000000000000..49a2c0e999df --- /dev/null +++ b/azurerm/internal/services/desktopvirtualization/virtual_desktop_application_resource.go @@ -0,0 +1,229 @@ +package desktopvirtualization + +import ( + "fmt" + "log" + "regexp" + "time" + + "github.com/Azure/azure-sdk-for-go/services/preview/desktopvirtualization/mgmt/2020-11-02-preview/desktopvirtualization" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/desktopvirtualization/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/desktopvirtualization/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +var applicationType = "azurerm_virtual_desktop_application" + +func resourceVirtualDesktopApplication() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceVirtualDesktopApplicationCreateUpdate, + Read: resourceVirtualDesktopApplicationRead, + Update: resourceVirtualDesktopApplicationCreateUpdate, + Delete: resourceVirtualDesktopApplicationDelete, + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(60 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(60 * time.Minute), + Delete: pluginsdk.DefaultTimeout(60 * time.Minute), + }, + + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.ApplicationID(id) + return err + }), + + SchemaVersion: 0, + + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringIsNotEmpty, + validation.StringMatch( + regexp.MustCompile("^[-a-zA-Z0-9]{1,260}$"), + "Virtual desktop application name must be 1 - 260 characters long, contain only letters, numbers and hyphens.", + ), + ), + }, + + "application_group_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.ApplicationGroupID, + }, + + "friendly_name": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 64), + Computed: true, + }, + + "description": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 512), + }, + + "path": { + Type: pluginsdk.TypeString, + Required: true, + }, + + "command_line_argument_policy": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + string(desktopvirtualization.Allow), + string(desktopvirtualization.DoNotAllow), + string(desktopvirtualization.Require), + }, false), + }, + + "command_line_arguments": { + Type: pluginsdk.TypeString, + Optional: true, + }, + + "show_in_portal": { + Type: pluginsdk.TypeBool, + Optional: true, + }, + + "icon_path": { + Type: pluginsdk.TypeString, + Optional: true, + Computed: true, + }, + + "icon_index": { + Type: pluginsdk.TypeInt, + Optional: true, + }, + }, + } +} + +func resourceVirtualDesktopApplicationCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DesktopVirtualization.ApplicationsClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + + log.Printf("[INFO] preparing arguments for Virtual Desktop Application creation") + + name := d.Get("name").(string) + applicationGroup, _ := parse.ApplicationGroupID(d.Get("application_group_id").(string)) + + locks.ByName(name, applicationType) + defer locks.UnlockByName(name, applicationType) + + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + resourceId := parse.NewApplicationID(subscriptionId, applicationGroup.ResourceGroup, applicationGroup.Name, name).ID() + if d.IsNewResource() { + existing, err := client.Get(ctx, applicationGroup.ResourceGroup, applicationGroup.Name, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("checking for presence of existing Virtual Desktop Application %q (Application Group %q) (Resource Group %q): %s", name, applicationGroup.Name, applicationGroup.ResourceGroup, err) + } + } + + if existing.ApplicationProperties != nil { + return tf.ImportAsExistsError("azurerm_virtual_desktop_application", resourceId) + } + } + + context := desktopvirtualization.Application{ + ApplicationProperties: &desktopvirtualization.ApplicationProperties{ + FriendlyName: utils.String(d.Get("friendly_name").(string)), + Description: utils.String(d.Get("description").(string)), + FilePath: utils.String(d.Get("path").(string)), + CommandLineSetting: desktopvirtualization.CommandLineSetting(d.Get("command_line_argument_policy").(string)), + CommandLineArguments: utils.String(d.Get("command_line_arguments").(string)), + ShowInPortal: utils.Bool(d.Get("show_in_portal").(bool)), + IconPath: utils.String(d.Get("icon_path").(string)), + IconIndex: utils.Int32(int32(d.Get("icon_index").(int))), + }, + } + + if _, err := client.CreateOrUpdate(ctx, applicationGroup.ResourceGroup, applicationGroup.Name, name, context); err != nil { + return fmt.Errorf("creating Virtual Desktop Application %q (Application Group %q) (Resource Group %q): %+v", name, applicationGroup.Name, applicationGroup.ResourceGroup, err) + } + + d.SetId(resourceId) + return resourceVirtualDesktopApplicationRead(d, meta) +} + +func resourceVirtualDesktopApplicationRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DesktopVirtualization.ApplicationsClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.ApplicationID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, id.ResourceGroup, id.ApplicationGroupName, id.Name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[DEBUG] Virtual Desktop Application %q was not found in Resource Group %q - removing from state!", id.Name, id.ResourceGroup) + d.SetId("") + return nil + } + + return fmt.Errorf("retrieving Virtual Desktop Application %q (Application Group %q) (Resource Group %q): %+v", id.Name, id.ApplicationGroupName, id.ResourceGroup, err) + } + + applicationGroup := parse.ApplicationGroupId{ + SubscriptionId: id.SubscriptionId, + ResourceGroup: id.ResourceGroup, + Name: id.ApplicationGroupName, + } + + d.Set("name", id.Name) + d.Set("application_group_id", applicationGroup.ID()) + + if props := resp.ApplicationProperties; props != nil { + d.Set("friendly_name", props.FriendlyName) + d.Set("description", props.Description) + d.Set("path", props.FilePath) + d.Set("command_line_argument_policy", props.CommandLineSetting) + d.Set("command_line_arguments", props.CommandLineArguments) + d.Set("show_in_portal", props.ShowInPortal) + d.Set("icon_path", props.IconPath) + d.Set("icon_index", props.IconIndex) + } + + return nil +} + +func resourceVirtualDesktopApplicationDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DesktopVirtualization.ApplicationsClient + + id, err := parse.ApplicationID(d.Id()) + if err != nil { + return err + } + + locks.ByName(id.Name, applicationType) + defer locks.UnlockByName(id.Name, applicationType) + + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + if _, err = client.Delete(ctx, id.ResourceGroup, id.ApplicationGroupName, id.Name); err != nil { + return fmt.Errorf("deleting Virtual Desktop Application %q (Application Group %q) (Resource Group %q): %+v", id.Name, id.ApplicationGroupName, id.ResourceGroup, err) + } + + return nil +} diff --git a/azurerm/internal/services/desktopvirtualization/virtual_desktop_application_resource_test.go b/azurerm/internal/services/desktopvirtualization/virtual_desktop_application_resource_test.go new file mode 100644 index 000000000000..d8f276c98dd7 --- /dev/null +++ b/azurerm/internal/services/desktopvirtualization/virtual_desktop_application_resource_test.go @@ -0,0 +1,203 @@ +package desktopvirtualization_test + +import ( + "context" + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/desktopvirtualization/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type VirtualDesktopApplicationResource struct { +} + +func TestAccVirtualDesktopApplication_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_virtual_desktop_application", "test") + r := VirtualDesktopApplicationResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + }) +} + +func TestAccVirtualDesktopApplication_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_virtual_desktop_application", "test") + r := VirtualDesktopApplicationResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + }) +} + +func TestAccVirtualDesktopApplication_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_virtual_desktop_application", "test") + r := VirtualDesktopApplicationResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + }) +} + +func TestAccVirtualDesktopApplication_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_virtual_desktop_application", "test") + r := VirtualDesktopApplicationResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + { + Config: r.requiresImport(data), + ExpectError: acceptance.RequiresImportError("azurerm_virtual_desktop_application"), + }, + }) +} + +func (VirtualDesktopApplicationResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.ApplicationID(state.ID) + if err != nil { + return nil, err + } + + resp, err := clients.DesktopVirtualization.ApplicationsClient.Get(ctx, id.ResourceGroup, id.ApplicationGroupName, id.Name) + if err != nil { + return nil, fmt.Errorf("retrieving Virtual Desktop Application Group %q (Resource Group: %q) does not exist", id.Name, id.ResourceGroup) + } + + return utils.Bool(resp.ApplicationProperties != nil), nil +} + +func (VirtualDesktopApplicationResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-vdesktop-%d" + location = "%s" +} + +resource "azurerm_virtual_desktop_host_pool" "test" { + name = "acctestHP" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + type = "Pooled" + load_balancer_type = "BreadthFirst" +} + +resource "azurerm_virtual_desktop_application_group" "test" { + name = "acctestAG%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + type = "RemoteApp" + host_pool_id = azurerm_virtual_desktop_host_pool.test.id +} + +resource "azurerm_virtual_desktop_application" "test" { + name = "acctestAG%d" + application_group_id = azurerm_virtual_desktop_application_group.test.id + path = "C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe" + command_line_argument_policy = "DoNotAllow" +} +`, data.RandomInteger, data.Locations.Secondary, data.RandomIntOfLength(8), data.RandomIntOfLength(8)) +} + +func (VirtualDesktopApplicationResource) complete(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-vdesktop-%d" + location = "%s" +} + +resource "azurerm_virtual_desktop_host_pool" "test" { + name = "acctestHP" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + validate_environment = true + description = "Acceptance Test: A host pool" + type = "Pooled" + load_balancer_type = "BreadthFirst" +} + +resource "azurerm_virtual_desktop_application_group" "test" { + name = "acctestAG%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + type = "RemoteApp" + host_pool_id = azurerm_virtual_desktop_host_pool.test.id + friendly_name = "TestAppGroup" + description = "Acceptance Test: An application group" + tags = { + Purpose = "Acceptance-Testing" + } +} + +resource "azurerm_virtual_desktop_application" "test" { + name = "acctestAG%d" + application_group_id = azurerm_virtual_desktop_application_group.test.id + friendly_name = "Google Chrome" + description = "Chromium based web browser" + path = "C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe" + command_line_argument_policy = "DoNotAllow" + command_line_arguments = "--incognito" + show_in_portal = false + icon_path = "C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe" + icon_index = 1 +} + +`, data.RandomInteger, data.Locations.Secondary, data.RandomIntOfLength(8), data.RandomIntOfLength(8)) +} + +func (r VirtualDesktopApplicationResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_virtual_desktop_application" "import" { + name = azurerm_virtual_desktop_application.test.name + application_group_id = azurerm_virtual_desktop_application.test.application_group_id + path = azurerm_virtual_desktop_application.test.path + command_line_argument_policy = azurerm_virtual_desktop_application.test.command_line_argument_policy + +} +`, r.basic(data)) +} diff --git a/azurerm/internal/services/desktopvirtualization/virtual_desktop_host_pool_resource.go b/azurerm/internal/services/desktopvirtualization/virtual_desktop_host_pool_resource.go index 050d8b935c01..4e9899f45902 100644 --- a/azurerm/internal/services/desktopvirtualization/virtual_desktop_host_pool_resource.go +++ b/azurerm/internal/services/desktopvirtualization/virtual_desktop_host_pool_resource.go @@ -5,13 +5,12 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/desktopvirtualization/migration" - - "github.com/Azure/azure-sdk-for-go/services/preview/desktopvirtualization/mgmt/2019-12-10-preview/desktopvirtualization" + "github.com/Azure/azure-sdk-for-go/services/preview/desktopvirtualization/mgmt/2020-11-02-preview/desktopvirtualization" "github.com/Azure/go-autorest/autorest/date" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/desktopvirtualization/migration" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/desktopvirtualization/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" @@ -117,6 +116,12 @@ func resourceVirtualDesktopHostPool() *pluginsdk.Resource { ValidateFunc: validation.IntBetween(0, 999999), }, + "start_vm_on_connect": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: false, + }, + "preferred_app_group_type": { Type: pluginsdk.TypeString, Optional: true, @@ -199,6 +204,7 @@ func resourceVirtualDesktopHostPoolCreateUpdate(d *pluginsdk.ResourceData, meta ValidationEnvironment: utils.Bool(d.Get("validate_environment").(bool)), CustomRdpProperty: utils.String(d.Get("custom_rdp_properties").(string)), MaxSessionLimit: utils.Int32(int32(d.Get("maximum_sessions_allowed").(int))), + StartVMOnConnect: utils.Bool(d.Get("start_vm_on_connect").(bool)), LoadBalancerType: desktopvirtualization.LoadBalancerType(d.Get("load_balancer_type").(string)), PersonalDesktopAssignmentType: desktopvirtualization.PersonalDesktopAssignmentType(d.Get("personal_desktop_assignment_type").(string)), PreferredAppGroupType: desktopvirtualization.PreferredAppGroupType(d.Get("preferred_app_group_type").(string)), @@ -256,6 +262,7 @@ func resourceVirtualDesktopHostPoolRead(d *pluginsdk.ResourceData, meta interfac d.Set("preferred_app_group_type", string(props.PreferredAppGroupType)) d.Set("type", string(props.HostPoolType)) d.Set("validate_environment", props.ValidationEnvironment) + d.Set("start_vm_on_connect", props.StartVMOnConnect) d.Set("custom_rdp_properties", props.CustomRdpProperty) if err := d.Set("registration_info", flattenVirtualDesktopHostPoolRegistrationInfo(props.RegistrationInfo)); err != nil { diff --git a/azurerm/internal/services/desktopvirtualization/virtual_desktop_host_pool_resource_test.go b/azurerm/internal/services/desktopvirtualization/virtual_desktop_host_pool_resource_test.go index 17be1843981b..cfafcf38c216 100644 --- a/azurerm/internal/services/desktopvirtualization/virtual_desktop_host_pool_resource_test.go +++ b/azurerm/internal/services/desktopvirtualization/virtual_desktop_host_pool_resource_test.go @@ -148,6 +148,7 @@ resource "azurerm_virtual_desktop_host_pool" "test" { friendly_name = "A Friendly Name!" description = "A Description!" validate_environment = true + start_vm_on_connect = true load_balancer_type = "BreadthFirst" maximum_sessions_allowed = 100 preferred_app_group_type = "Desktop" diff --git a/azurerm/internal/services/desktopvirtualization/virtual_desktop_workspace_application_group_association_resource.go b/azurerm/internal/services/desktopvirtualization/virtual_desktop_workspace_application_group_association_resource.go index 12254e07019c..95b31ee59657 100644 --- a/azurerm/internal/services/desktopvirtualization/virtual_desktop_workspace_application_group_association_resource.go +++ b/azurerm/internal/services/desktopvirtualization/virtual_desktop_workspace_application_group_association_resource.go @@ -6,12 +6,11 @@ import ( "strings" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/desktopvirtualization/migration" - - "github.com/Azure/azure-sdk-for-go/services/preview/desktopvirtualization/mgmt/2019-12-10-preview/desktopvirtualization" + "github.com/Azure/azure-sdk-for-go/services/preview/desktopvirtualization/mgmt/2020-11-02-preview/desktopvirtualization" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/desktopvirtualization/migration" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/desktopvirtualization/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/desktopvirtualization/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" diff --git a/azurerm/internal/services/desktopvirtualization/virtual_desktop_workspace_resource.go b/azurerm/internal/services/desktopvirtualization/virtual_desktop_workspace_resource.go index f4e48563347c..caec7a41566f 100644 --- a/azurerm/internal/services/desktopvirtualization/virtual_desktop_workspace_resource.go +++ b/azurerm/internal/services/desktopvirtualization/virtual_desktop_workspace_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/preview/desktopvirtualization/mgmt/2019-12-10-preview/desktopvirtualization" + "github.com/Azure/azure-sdk-for-go/services/preview/desktopvirtualization/mgmt/2020-11-02-preview/desktopvirtualization" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" diff --git a/azurerm/internal/services/domainservices/active_directory_domain_service_data_source.go b/azurerm/internal/services/domainservices/active_directory_domain_service_data_source.go new file mode 100644 index 000000000000..b7e977f27287 --- /dev/null +++ b/azurerm/internal/services/domainservices/active_directory_domain_service_data_source.go @@ -0,0 +1,285 @@ +package domainservices + +import ( + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/services/domainservices/mgmt/2020-01-01/aad" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func dataSourceActiveDirectoryDomainService() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Read: dataSourceActiveDirectoryDomainServiceRead, + + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + }, + + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotWhiteSpace, + }, + + "resource_group_name": azure.SchemaResourceGroupNameForDataSource(), + + "deployment_id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "domain_configuration_type": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "domain_name": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "filtered_sync_enabled": { + Type: pluginsdk.TypeBool, + Computed: true, + }, + + "location": azure.SchemaLocationForDataSource(), + + "notifications": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "additional_recipients": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + "notify_dc_admins": { + Type: pluginsdk.TypeBool, + Computed: true, + }, + "notify_global_admins": { + Type: pluginsdk.TypeBool, + Computed: true, + }, + }, + }, + }, + + "replica_sets": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Resource{ + Schema: dataSourceActiveDirectoryDomainServiceReplicaSetSchema(), + }, + }, + + "secure_ldap": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "enabled": { + Type: pluginsdk.TypeBool, + Computed: true, + }, + + "external_access_enabled": { + Type: pluginsdk.TypeBool, + Computed: true, + }, + + "certificate_expiry": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "certificate_thumbprint": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "public_certificate": { + Type: pluginsdk.TypeString, + Computed: true, + }, + }, + }, + }, + + "security": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "ntlm_v1_enabled": { + Type: pluginsdk.TypeBool, + Computed: true, + }, + + "sync_kerberos_passwords": { + Type: pluginsdk.TypeBool, + Computed: true, + }, + + "sync_ntlm_passwords": { + Type: pluginsdk.TypeBool, + Computed: true, + }, + + "sync_on_prem_passwords": { + Type: pluginsdk.TypeBool, + Computed: true, + }, + + "tls_v1_enabled": { + Type: pluginsdk.TypeBool, + Computed: true, + }, + }, + }, + }, + + "sku": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "sync_owner": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "tags": tags.SchemaDataSource(), + + "tenant_id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "version": { + Type: pluginsdk.TypeInt, + Computed: true, + }, + }, + } +} + +func dataSourceActiveDirectoryDomainServiceReplicaSetSchema() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{ + // TODO: add health-related attributes + + "domain_controller_ip_addresses": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "external_access_ip_address": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "location": azure.SchemaLocationForDataSource(), + + "service_status": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "subnet_id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + } +} + +func dataSourceActiveDirectoryDomainServiceRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DomainServices.DomainServicesClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + name := d.Get("name").(string) + resourceGroup := d.Get("resource_group_name").(string) + + resp, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return nil + } + return err + } + + if resp.ID == nil { + return fmt.Errorf("reading Domain Service: ID was returned nil") + } + d.SetId(*resp.ID) + + d.Set("name", name) + d.Set("resource_group_name", resourceGroup) + + if resp.Location == nil { + return fmt.Errorf("reading Domain Service %q: location was returned nil", d.Id()) + } + d.Set("location", azure.NormalizeLocation(*resp.Location)) + + if props := resp.DomainServiceProperties; props != nil { + d.Set("deployment_id", props.DeploymentID) + + domainConfigType := "" + if v := props.DomainConfigurationType; v != nil { + domainConfigType = *v + } + d.Set("domain_configuration_type", domainConfigType) + + d.Set("domain_name", props.DomainName) + + d.Set("filtered_sync_enabled", false) + if props.FilteredSync == aad.FilteredSyncEnabled { + d.Set("filtered_sync_enabled", true) + } + + d.Set("sku", props.Sku) + d.Set("sync_owner", props.SyncOwner) + d.Set("tenant_id", props.TenantID) + d.Set("version", props.Version) + + if err := d.Set("notifications", flattenDomainServiceNotifications(props.NotificationSettings)); err != nil { + return fmt.Errorf("setting `notifications`: %+v", err) + } + + if err := d.Set("secure_ldap", flattenDomainServiceLdaps(d, props.LdapsSettings, true)); err != nil { + return fmt.Errorf("setting `secure_ldap`: %+v", err) + } + + if err := d.Set("security", flattenDomainServiceSecurity(props.DomainSecuritySettings)); err != nil { + return fmt.Errorf("setting `security`: %+v", err) + } + + replicaSets := flattenDomainServiceReplicaSets(props.ReplicaSets) + if err := d.Set("replica_sets", replicaSets); err != nil { + return fmt.Errorf("setting `replica_sets`: %+v", err) + } + } + + return tags.FlattenAndSet(d, resp.Tags) +} diff --git a/azurerm/internal/services/domainservices/active_directory_domain_service_replica_set_resource.go b/azurerm/internal/services/domainservices/active_directory_domain_service_replica_set_resource.go new file mode 100644 index 000000000000..219115a0f8cc --- /dev/null +++ b/azurerm/internal/services/domainservices/active_directory_domain_service_replica_set_resource.go @@ -0,0 +1,332 @@ +package domainservices + +import ( + "fmt" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/services/domainservices/mgmt/2020-01-01/aad" + + networkValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/domainservices/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/domainservices/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceActiveDirectoryDomainServiceReplicaSet() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceActiveDirectoryDomainServiceReplicaSetCreate, + Read: resourceActiveDirectoryDomainServiceReplicaSetRead, + Delete: resourceActiveDirectoryDomainServiceReplicaSetDelete, + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(3 * time.Hour), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(2 * time.Hour), + Delete: pluginsdk.DefaultTimeout(1 * time.Hour), + }, + + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.DomainServiceReplicaSetID(id) + return err + }), + + Schema: map[string]*pluginsdk.Schema{ + "domain_service_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.DomainServiceID, + }, + + "location": azure.SchemaLocation(), + + "subnet_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: networkValidate.SubnetID, + }, + + "domain_controller_ip_addresses": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "external_access_ip_address": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "service_status": { + Type: pluginsdk.TypeString, + Computed: true, + }, + }, + } +} + +func resourceActiveDirectoryDomainServiceReplicaSetCreate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DomainServices.DomainServicesClient + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + domainServiceId, err := parse.DomainServiceID(d.Get("domain_service_id").(string)) + if err != nil { + return err + } + if domainServiceId == nil { + return fmt.Errorf("parsing ID for Domain Service Replica Set") + } + + locks.ByName(domainServiceId.Name, DomainServiceResourceName) + defer locks.UnlockByName(domainServiceId.Name, DomainServiceResourceName) + + domainService, err := client.Get(ctx, domainServiceId.ResourceGroup, domainServiceId.Name) + if err != nil { + if utils.ResponseWasNotFound(domainService.Response) { + return fmt.Errorf("could not find %s: %s", domainServiceId, err) + } + return fmt.Errorf("reading %s: %s", domainServiceId, err) + } + + if domainService.DomainServiceProperties.ReplicaSets == nil || len(*domainService.DomainServiceProperties.ReplicaSets) == 0 { + return fmt.Errorf("reading %s: returned with missing replica set information, expected at least 1 replica set: %s", domainServiceId, err) + } + + subnetId := d.Get("subnet_id").(string) + replicaSets := *domainService.DomainServiceProperties.ReplicaSets + + for _, r := range replicaSets { + if r.ReplicaSetID == nil { + return fmt.Errorf("reading %s: a replica set was returned with a missing ReplicaSetID", domainServiceId) + } + if r.SubnetID == nil { + return fmt.Errorf("reading %s: a replica set was returned with a missing SubnetID", domainServiceId) + } + + // We assume that two replica sets cannot coexist in the same subnet + if strings.EqualFold(subnetId, *r.SubnetID) { + // Generate an ID here since we only know it once we know the ReplicaSetID + id := parse.NewDomainServiceReplicaSetID(domainServiceId.SubscriptionId, domainServiceId.ResourceGroup, domainServiceId.Name, *r.ReplicaSetID) + return tf.ImportAsExistsError("azurerm_active_directory_domain_service_replica_set", id.ID()) + } + } + + loc := location.Normalize(d.Get("location").(string)) + replicaSets = append(replicaSets, aad.ReplicaSet{ + Location: utils.String(loc), + SubnetID: utils.String(subnetId), + }) + + domainService.DomainServiceProperties.ReplicaSets = &replicaSets + + future, err := client.CreateOrUpdate(ctx, domainServiceId.ResourceGroup, domainServiceId.Name, domainService) + if err != nil { + return fmt.Errorf("creating/updating Replica Sets for %s: %+v", domainServiceId, err) + } + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for Replica Sets for %s: %+v", domainServiceId, err) + } + + // We need to retrieve the domain service again to find out the new replica set ID + domainService, err = client.Get(ctx, domainServiceId.ResourceGroup, domainServiceId.Name) + if err != nil { + if utils.ResponseWasNotFound(domainService.Response) { + return fmt.Errorf("could not find %s: %s", domainServiceId, err) + } + return fmt.Errorf("reading %s: %s", domainServiceId, err) + } + + if domainService.DomainServiceProperties.ReplicaSets == nil || len(*domainService.DomainServiceProperties.ReplicaSets) == 0 { + return fmt.Errorf("reading %s: returned with missing replica set information, expected at least 1 replica set: %s", domainServiceId, err) + } + + var id parse.DomainServiceReplicaSetId + // Assuming that two replica sets cannot coexist in the same subnet, we identify our new replica set by its SubnetID + for _, r := range *domainService.DomainServiceProperties.ReplicaSets { + if r.ReplicaSetID == nil { + return fmt.Errorf("reading %s: a replica set was returned with a missing ReplicaSetID", domainServiceId) + } + if r.SubnetID == nil { + return fmt.Errorf("reading %s: a replica set was returned with a missing SubnetID", domainServiceId) + } + + if strings.EqualFold(subnetId, *r.SubnetID) { + // We found it! + id = parse.NewDomainServiceReplicaSetID(domainServiceId.SubscriptionId, domainServiceId.ResourceGroup, domainServiceId.Name, *r.ReplicaSetID) + } + } + + if id.ReplicaSetName == "" { + return fmt.Errorf("reading %s: the new replica set was not returned", domainServiceId) + } + + // Wait for all replica sets to become available with two domain controllers each before proceeding + timeout, _ := ctx.Deadline() + stateConf := &pluginsdk.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"available"}, + Refresh: domainServiceControllerRefreshFunc(ctx, client, *domainServiceId, false), + Delay: 1 * time.Minute, + PollInterval: 1 * time.Minute, + Timeout: time.Until(timeout), + } + + if _, err := stateConf.WaitForStateContext(ctx); err != nil { + return fmt.Errorf("waiting for both domain controllers to become available in all replica sets for %s: %+v", domainServiceId, err) + } + + d.SetId(id.ID()) + + return resourceActiveDirectoryDomainServiceReplicaSetRead(d, meta) +} + +func resourceActiveDirectoryDomainServiceReplicaSetRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DomainServices.DomainServicesClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.DomainServiceReplicaSetID(d.Id()) + if err != nil { + return err + } + + domainService, err := client.Get(ctx, id.ResourceGroup, id.DomainServiceName) + if err != nil { + if utils.ResponseWasNotFound(domainService.Response) { + d.SetId("") + return nil + } + return err + } + + if domainService.DomainServiceProperties.ReplicaSets == nil || len(*domainService.DomainServiceProperties.ReplicaSets) == 0 { + return fmt.Errorf("reading %s: domain service returned with missing replica set information, expected at least 1 replica set: %s", id, err) + } + + var ( + domainControllerIpAddresses []string + externalAccessIpAddress string + loc string + serviceStatus string + subnetId string + ) + + replicaSets := *domainService.DomainServiceProperties.ReplicaSets + + for _, r := range replicaSets { + if r.ReplicaSetID == nil { + return fmt.Errorf("reading %s: a replica set was returned with a missing ReplicaSetID", id) + } + + // ReplicaSetName in the ID struct is really the replica set ID + if *r.ReplicaSetID == id.ReplicaSetName { + if r.DomainControllerIPAddress != nil { + domainControllerIpAddresses = *r.DomainControllerIPAddress + } + if r.ExternalAccessIPAddress != nil { + externalAccessIpAddress = *r.ExternalAccessIPAddress + } + if r.Location != nil { + loc = location.NormalizeNilable(r.Location) + } + if r.ServiceStatus != nil { + serviceStatus = *r.ServiceStatus + } + if r.SubnetID != nil { + subnetId = *r.SubnetID + } + } + } + + d.Set("domain_controller_ip_addresses", domainControllerIpAddresses) + d.Set("external_access_ip_address", externalAccessIpAddress) + d.Set("location", loc) + d.Set("service_status", serviceStatus) + d.Set("subnet_id", subnetId) + + return nil +} + +func resourceActiveDirectoryDomainServiceReplicaSetDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DomainServices.DomainServicesClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.DomainServiceReplicaSetID(d.Id()) + if err != nil { + return err + } + + domainService, err := client.Get(ctx, id.ResourceGroup, id.DomainServiceName) + if err != nil { + if utils.ResponseWasNotFound(domainService.Response) { + return fmt.Errorf("deleting %s: domain service was not found: %s", id, err) + } + return err + } + + if domainService.DomainServiceProperties.ReplicaSets == nil || len(*domainService.DomainServiceProperties.ReplicaSets) == 0 { + return fmt.Errorf("deleting %s: domain service returned with missing replica set information, expected at least 1 replica set: %s", id, err) + } + + replicaSets := *domainService.DomainServiceProperties.ReplicaSets + + newReplicaSets := make([]aad.ReplicaSet, 0) + for _, r := range replicaSets { + if r.ReplicaSetID == nil { + return fmt.Errorf("deleting %s: a replica set was returned with a missing ReplicaSetID", id) + } + + if *r.ReplicaSetID == id.ReplicaSetName { + continue + } + + newReplicaSets = append(newReplicaSets, r) + } + + if len(replicaSets) == len(newReplicaSets) { + return fmt.Errorf("deleting %s: could not determine which replica set to remove", id) + } + + domainService.DomainServiceProperties.ReplicaSets = &newReplicaSets + + future, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.DomainServiceName, domainService) + if err != nil { + return fmt.Errorf("deleting %s: %+v", id, err) + } + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for deletion of %s: %+v", id, err) + } + + // Wait for all replica sets to become available with two domain controllers each before proceeding + // Generate a partial DomainServiceId since we don't need to know the initial replica set ID here + domainServiceId := parse.NewDomainServiceID(id.SubscriptionId, id.ResourceGroup, id.DomainServiceName, "") + timeout, _ := ctx.Deadline() + stateConf := &pluginsdk.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"available"}, + Refresh: domainServiceControllerRefreshFunc(ctx, client, domainServiceId, true), + Delay: 1 * time.Minute, + PollInterval: 1 * time.Minute, + Timeout: time.Until(timeout), + } + + if _, err := stateConf.WaitForStateContext(ctx); err != nil { + return fmt.Errorf("waiting for replica sets to finish updating for %s: %+v", domainServiceId, err) + } + + return nil +} diff --git a/azurerm/internal/services/domainservices/active_directory_domain_service_resource.go b/azurerm/internal/services/domainservices/active_directory_domain_service_resource.go new file mode 100644 index 000000000000..7420adc457d6 --- /dev/null +++ b/azurerm/internal/services/domainservices/active_directory_domain_service_resource.go @@ -0,0 +1,771 @@ +package domainservices + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/services/domainservices/mgmt/2020-01-01/aad" + "github.com/hashicorp/go-azure-helpers/response" + + azValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + networkValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/domainservices/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +const DomainServiceResourceName = "azurerm_active_directory_domain_service" + +func resourceActiveDirectoryDomainService() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceActiveDirectoryDomainServiceCreateUpdate, + Read: resourceActiveDirectoryDomainServiceRead, + Update: resourceActiveDirectoryDomainServiceCreateUpdate, + Delete: resourceActiveDirectoryDomainServiceDelete, + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(3 * time.Hour), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(2 * time.Hour), + Delete: pluginsdk.DefaultTimeout(1 * time.Hour), + }, + + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.DomainServiceID(id) + return err + }), + + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringIsNotEmpty, // TODO: proper validation + }, + + "location": azure.SchemaLocation(), + + "resource_group_name": azure.SchemaResourceGroupName(), + + "domain_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringIsNotEmpty, // TODO: proper validation, first prefix must be 15 chars or less + }, + + "initial_replica_set": { + Type: pluginsdk.TypeList, + Required: true, + MinItems: 1, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "domain_controller_ip_addresses": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "external_access_ip_address": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + // location is computed here + "location": azure.SchemaLocationForDataSource(), + + "service_status": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "subnet_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: networkValidate.SubnetID, + }, + }, + }, + }, + + "sku": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "Standard", + "Enterprise", + "Premium", + }, false), + }, + + "filtered_sync_enabled": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: false, + }, + + "notifications": { + Type: pluginsdk.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "additional_recipients": { + Type: pluginsdk.TypeSet, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringIsNotWhiteSpace, + }, + }, + + "notify_dc_admins": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: false, + }, + + "notify_global_admins": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, + + "secure_ldap": { + Type: pluginsdk.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "enabled": { + Type: pluginsdk.TypeBool, + Required: true, + }, + + "external_access_enabled": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: false, + }, + + "pfx_certificate": { + Type: pluginsdk.TypeString, + Required: true, + Sensitive: true, + ValidateFunc: azValidate.Base64EncodedString, + }, + + "pfx_certificate_password": { + Type: pluginsdk.TypeString, + Required: true, + Sensitive: true, + }, + + "certificate_expiry": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "certificate_thumbprint": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "public_certificate": { + Type: pluginsdk.TypeString, + Computed: true, + }, + }, + }, + }, + + "security": { + Type: pluginsdk.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "ntlm_v1_enabled": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: false, + }, + + "sync_kerberos_passwords": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: false, + }, + + "sync_ntlm_passwords": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: false, + }, + + "sync_on_prem_passwords": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: false, + }, + + "tls_v1_enabled": { + Type: pluginsdk.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, + + "tags": tags.Schema(), + + "deployment_id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "sync_owner": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "tenant_id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "version": { + Type: pluginsdk.TypeInt, + Computed: true, + }, + }, + } +} + +func resourceActiveDirectoryDomainServiceCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DomainServices.DomainServicesClient + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + name := d.Get("name").(string) + resourceGroup := d.Get("resource_group_name").(string) + resourceErrorName := fmt.Sprintf("Domain Service (Name: %q, Resource Group: %q)", name, resourceGroup) + + locks.ByName(name, DomainServiceResourceName) + defer locks.UnlockByName(name, DomainServiceResourceName) + + // If this is a new resource, we cannot determine the resource ID until after it has been created since we need to + // know the ID of the first replica set. + var id *parse.DomainServiceId + + if d.IsNewResource() { + existing, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("checking for presence of existing %s: %s", resourceErrorName, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + // Parse the replica sets and assume the first one returned to be the initial replica set + // This is a best effort and the user can choose any replica set if they structure their config accordingly + props := existing.DomainServiceProperties + if props == nil { + return fmt.Errorf("checking for presence of existing %s: API response contained nil or missing properties", resourceErrorName) + } + replicaSets := flattenDomainServiceReplicaSets(props.ReplicaSets) + if len(replicaSets) == 0 { + return fmt.Errorf("checking for presence of existing %s: API response contained nil or missing replica set details", resourceErrorName) + } + initialReplicaSetId := replicaSets[0].(map[string]interface{})["id"].(string) + id := parse.NewDomainServiceID(client.SubscriptionID, resourceGroup, name, initialReplicaSetId) + + return tf.ImportAsExistsError(DomainServiceResourceName, id.ID()) + } + } else { + var err error + id, err = parse.DomainServiceID(d.Id()) + if err != nil { + return fmt.Errorf("preparing update for %s: %+v", resourceErrorName, err) + } + if id == nil { + return fmt.Errorf("preparing update for %s: resource ID could not be parsed", resourceErrorName) + } + } + + loc := location.Normalize(d.Get("location").(string)) + filteredSync := aad.FilteredSyncDisabled + if d.Get("filtered_sync_enabled").(bool) { + filteredSync = aad.FilteredSyncDisabled + } + + domainService := aad.DomainService{ + DomainServiceProperties: &aad.DomainServiceProperties{ + DomainName: utils.String(d.Get("domain_name").(string)), + DomainSecuritySettings: expandDomainServiceSecurity(d.Get("security").([]interface{})), + FilteredSync: filteredSync, + LdapsSettings: expandDomainServiceLdaps(d.Get("secure_ldap").([]interface{})), + NotificationSettings: expandDomainServiceNotifications(d.Get("notifications").([]interface{})), + Sku: utils.String(d.Get("sku").(string)), + }, + Location: utils.String(loc), + Tags: tags.Expand(d.Get("tags").(map[string]interface{})), + } + + if d.IsNewResource() { + // On resource creation, specify the initial replica set. + // No provision is made for changing the initial replica set, it should remain intact for the resource to function properly + replicaSets := []aad.ReplicaSet{ + { + Location: utils.String(loc), + SubnetID: utils.String(d.Get("initial_replica_set.0.subnet_id").(string)), + }, + } + domainService.DomainServiceProperties.ReplicaSets = &replicaSets + } + + future, err := client.CreateOrUpdate(ctx, resourceGroup, name, domainService) + if err != nil { + return fmt.Errorf("creating/updating %s: %+v", resourceErrorName, err) + } + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for %s: %+v", resourceErrorName, err) + } + + // Retrieve the domain service to discover the unique ID for the initial replica set, which should not subsequently change + if d.IsNewResource() { + resp, err := client.Get(ctx, resourceGroup, name) + if err != nil { + return fmt.Errorf("retrieving %s after creating: %+v", resourceErrorName, err) + } + props := resp.DomainServiceProperties + if props == nil { + return fmt.Errorf("%s returned with no properties", resourceErrorName) + } + if props.ReplicaSets == nil { + return fmt.Errorf("%s returned with no replica set details", resourceErrorName) + } + + replicaSets := flattenDomainServiceReplicaSets(props.ReplicaSets) + if replicaSetCount := len(replicaSets); replicaSetCount != 1 { + return fmt.Errorf("unexpected number of replica sets for %s: expected 1, saw %d", resourceErrorName, replicaSetCount) + } + + // Once we know the initial replica set ID, we can build a resource ID + initialReplicaSetId := replicaSets[0].(map[string]interface{})["id"].(string) + newId := parse.NewDomainServiceID(client.SubscriptionID, resourceGroup, name, initialReplicaSetId) + id = &newId + d.SetId(id.ID()) + + if err := d.Set("initial_replica_set", []interface{}{replicaSets[0]}); err != nil { + return fmt.Errorf("setting `initial_replica_set` after creating resource: %+v", err) + } + } + + if id == nil { + return fmt.Errorf("after creating/updating %s: id was unexpectedly nil", resourceErrorName) + } + + // A fully deployed domain service has 2 domain controllers per replica set, but the create operation completes early before the DCs are online. + // The domain service is still provisioning and further operations are blocked until both DCs are up and ready. + timeout, _ := ctx.Deadline() + stateConf := &pluginsdk.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"available"}, + Refresh: domainServiceControllerRefreshFunc(ctx, client, *id, false), + Delay: 1 * time.Minute, + PollInterval: 1 * time.Minute, + Timeout: time.Until(timeout), + } + + if _, err := stateConf.WaitForStateContext(ctx); err != nil { + return fmt.Errorf("waiting for both domain controllers to become available in initial replica set for %s: %+v", id, err) + } + + return resourceActiveDirectoryDomainServiceRead(d, meta) +} + +func resourceActiveDirectoryDomainServiceRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DomainServices.DomainServicesClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.DomainServiceID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, id.ResourceGroup, id.Name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + d.SetId("") + return nil + } + return err + } + + d.Set("name", id.Name) + d.Set("resource_group_name", id.ResourceGroup) + + loc := location.NormalizeNilable(resp.Location) + d.Set("location", loc) + + if props := resp.DomainServiceProperties; props != nil { + d.Set("deployment_id", props.DeploymentID) + d.Set("domain_name", props.DomainName) + d.Set("sync_owner", props.SyncOwner) + d.Set("tenant_id", props.TenantID) + d.Set("version", props.Version) + + d.Set("filtered_sync_enabled", false) + if props.FilteredSync == aad.FilteredSyncEnabled { + d.Set("filtered_sync_enabled", true) + } + + d.Set("sku", props.Sku) + + if err := d.Set("notifications", flattenDomainServiceNotifications(props.NotificationSettings)); err != nil { + return fmt.Errorf("setting `notifications`: %+v", err) + } + + var initialReplicaSet interface{} + replicaSets := flattenDomainServiceReplicaSets(props.ReplicaSets) + + // Determine the initial replica set. This is why we need to include InitialReplicaSetId in the resource ID, + // without it we would not be able to reliably support importing. + for _, replicaSetRaw := range replicaSets { + replicaSet := replicaSetRaw.(map[string]interface{}) + if replicaSet["id"].(string) == id.InitialReplicaSetIdName { + initialReplicaSet = replicaSetRaw + break + } + } + if initialReplicaSet == nil { + // It's safest to error out here, since we don't want to wipe the initial replica set from state if it was deleted manually + return fmt.Errorf("reading %s: could not determine initial replica set from API response", id) + } + if err := d.Set("initial_replica_set", []interface{}{initialReplicaSet}); err != nil { + return fmt.Errorf("setting `initial_replica_set`: %+v", err) + } + + if err := d.Set("secure_ldap", flattenDomainServiceLdaps(d, props.LdapsSettings, false)); err != nil { + return fmt.Errorf("setting `secure_ldap`: %+v", err) + } + + if err := d.Set("security", flattenDomainServiceSecurity(props.DomainSecuritySettings)); err != nil { + return fmt.Errorf("setting `security`: %+v", err) + } + } + + return tags.FlattenAndSet(d, resp.Tags) +} + +func resourceActiveDirectoryDomainServiceDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).DomainServices.DomainServicesClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.DomainServiceID(d.Id()) + if err != nil { + return err + } + + future, err := client.Delete(ctx, id.ResourceGroup, id.Name) + if err != nil { + if response.WasNotFound(future.Response()) { + return nil + } + return fmt.Errorf("deleting %s: %+v", id, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + if !response.WasNotFound(future.Response()) { + return fmt.Errorf("waiting for deletion of %s: %+v", id, err) + } + } + + return nil +} + +func domainServiceControllerRefreshFunc(ctx context.Context, client *aad.DomainServicesClient, id parse.DomainServiceId, deleting bool) pluginsdk.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Waiting for domain controllers to deploy...") + resp, err := client.Get(ctx, id.ResourceGroup, id.Name) + if err != nil { + return nil, "error", err + } + if resp.DomainServiceProperties == nil || resp.DomainServiceProperties.ReplicaSets == nil || len(*resp.DomainServiceProperties.ReplicaSets) == 0 { + return nil, "error", fmt.Errorf("API error: `replicaSets` was not returned") + } + // Loop through all replica sets and ensure they are running and each have two available domain controllers + for _, repl := range *resp.DomainServiceProperties.ReplicaSets { + if repl.ServiceStatus == nil { + return resp, "pending", nil + } + switch { + case !deleting && strings.EqualFold(*repl.ServiceStatus, "TearingDown"): + // Sometimes a service error will cause the replica set, or resource, to self destruct + return resp, "error", fmt.Errorf("service error: a replica set is unexpectedly tearing down") + case strings.EqualFold(*repl.ServiceStatus, "Failed"): + // If a replica set enters a failed state, it needs manual intervention + return resp, "error", fmt.Errorf("service error: a replica set has entered a Failed state and must be recovered or deleted manually") + case !strings.EqualFold(*repl.ServiceStatus, "Running"): + // If it's not yet running, it isn't ready + return resp, "pending", nil + case repl.DomainControllerIPAddress == nil || len(*repl.DomainControllerIPAddress) < 2: + // When a domain controller is online, its IP address will be returned. We're looking for 2 active domain controllers. + return resp, "pending", nil + } + } + return resp, "available", nil + } +} + +func expandDomainServiceLdaps(input []interface{}) (ldaps *aad.LdapsSettings) { + ldaps = &aad.LdapsSettings{ + Ldaps: aad.LdapsDisabled, + } + + if len(input) > 0 { + v := input[0].(map[string]interface{}) + if v["enabled"].(bool) { + ldaps.Ldaps = aad.LdapsEnabled + } + ldaps.PfxCertificate = utils.String(v["pfx_certificate"].(string)) + ldaps.PfxCertificatePassword = utils.String(v["pfx_certificate_password"].(string)) + if v["external_access_enabled"].(bool) { + ldaps.ExternalAccess = aad.Enabled + } else { + ldaps.ExternalAccess = aad.Disabled + } + } + + return +} + +func expandDomainServiceNotifications(input []interface{}) *aad.NotificationSettings { + if len(input) == 0 { + return nil + } + + v := input[0].(map[string]interface{}) + + additionalRecipients := make([]string, 0) + if ar, ok := v["additional_recipients"]; ok { + for _, r := range ar.(*pluginsdk.Set).List() { + additionalRecipients = append(additionalRecipients, r.(string)) + } + } + + notifyDcAdmins := aad.NotifyDcAdminsDisabled + if n, ok := v["notify_dc_admins"]; ok && n.(bool) { + notifyDcAdmins = aad.NotifyDcAdminsEnabled + } + + notifyGlobalAdmins := aad.NotifyGlobalAdminsDisabled + if n, ok := v["notify_global_admins"]; ok && n.(bool) { + notifyGlobalAdmins = aad.NotifyGlobalAdminsEnabled + } + + return &aad.NotificationSettings{ + AdditionalRecipients: &additionalRecipients, + NotifyDcAdmins: notifyDcAdmins, + NotifyGlobalAdmins: notifyGlobalAdmins, + } +} + +func expandDomainServiceSecurity(input []interface{}) *aad.DomainSecuritySettings { + if len(input) == 0 { + return nil + } + v := input[0].(map[string]interface{}) + + ntlmV1 := aad.NtlmV1Disabled + syncKerberosPasswords := aad.SyncKerberosPasswordsDisabled + syncNtlmPasswords := aad.SyncNtlmPasswordsDisabled + syncOnPremPasswords := aad.SyncOnPremPasswordsDisabled + tlsV1 := aad.TLSV1Disabled + + if v["ntlm_v1_enabled"].(bool) { + ntlmV1 = aad.NtlmV1Enabled + } + if v["sync_kerberos_passwords"].(bool) { + syncKerberosPasswords = aad.SyncKerberosPasswordsEnabled + } + if v["sync_ntlm_passwords"].(bool) { + syncNtlmPasswords = aad.SyncNtlmPasswordsEnabled + } + if v["sync_on_prem_passwords"].(bool) { + syncOnPremPasswords = aad.SyncOnPremPasswordsEnabled + } + if v["tls_v1_enabled"].(bool) { + tlsV1 = aad.TLSV1Enabled + } + + return &aad.DomainSecuritySettings{ + NtlmV1: ntlmV1, + SyncKerberosPasswords: syncKerberosPasswords, + SyncNtlmPasswords: syncNtlmPasswords, + SyncOnPremPasswords: syncOnPremPasswords, + TLSV1: tlsV1, + } +} + +func flattenDomainServiceLdaps(d *pluginsdk.ResourceData, input *aad.LdapsSettings, dataSource bool) []interface{} { + result := map[string]interface{}{ + "enabled": false, + "external_access_enabled": false, + "certificate_expiry": "", + "certificate_thumbprint": "", + "public_certificate": "", + } + + if !dataSource { + // Read pfx_certificate and pfx_certificate_password from existing state since it's not returned + result["pfx_certificate"] = "" + if v, ok := d.GetOk("secure_ldap.0.pfx_certificate"); ok { + result["pfx_certificate"] = v.(string) + } + result["pfx_certificate_password"] = "" + if v, ok := d.GetOk("secure_ldap.0.pfx_certificate_password"); ok { + result["pfx_certificate_password"] = v.(string) + } + } + + if input != nil { + if input.ExternalAccess == aad.Enabled { + result["external_access_enabled"] = true + } + if input.Ldaps == aad.LdapsEnabled { + result["enabled"] = true + } + if v := input.CertificateNotAfter; v != nil { + result["certificate_expiry"] = v.Format(time.RFC3339) + } + if v := input.CertificateThumbprint; v != nil { + result["certificate_thumbprint"] = *v + } + if v := input.PublicCertificate; v != nil { + result["public_certificate"] = *v + } + } + + return []interface{}{result} +} + +func flattenDomainServiceNotifications(input *aad.NotificationSettings) []interface{} { + if input == nil { + return make([]interface{}, 0) + } + + result := map[string]interface{}{ + "additional_recipients": make([]string, 0), + "notify_dc_admins": false, + "notify_global_admins": false, + } + if input.AdditionalRecipients != nil { + result["additional_recipients"] = *input.AdditionalRecipients + } + if input.NotifyDcAdmins == aad.NotifyDcAdminsEnabled { + result["notify_dc_admins"] = true + } + if input.NotifyGlobalAdmins == aad.NotifyGlobalAdminsEnabled { + result["notify_global_admins"] = true + } + + return []interface{}{result} +} + +func flattenDomainServiceReplicaSets(input *[]aad.ReplicaSet) (ret []interface{}) { + if input == nil { + return + } + + for _, in := range *input { + repl := map[string]interface{}{ + "domain_controller_ip_addresses": make([]string, 0), + "external_access_ip_address": "", + "location": location.NormalizeNilable(in.Location), + "id": "", + "service_status": "", + "subnet_id": "", + } + if in.DomainControllerIPAddress != nil { + repl["domain_controller_ip_addresses"] = *in.DomainControllerIPAddress + } + if in.ExternalAccessIPAddress != nil { + repl["external_access_ip_address"] = *in.ExternalAccessIPAddress + } + if in.ReplicaSetID != nil { + repl["id"] = *in.ReplicaSetID + } + if in.ServiceStatus != nil { + repl["service_status"] = *in.ServiceStatus + } + if in.SubnetID != nil { + repl["subnet_id"] = *in.SubnetID + } + ret = append(ret, repl) + } + + return +} + +func flattenDomainServiceSecurity(input *aad.DomainSecuritySettings) []interface{} { + if input == nil { + return make([]interface{}, 0) + } + + result := map[string]bool{ + "ntlm_v1_enabled": false, + "sync_kerberos_passwords": false, + "sync_ntlm_passwords": false, + "sync_on_prem_passwords": false, + "tls_v1_enabled": false, + } + if input.NtlmV1 == aad.NtlmV1Enabled { + result["ntlm_v1_enabled"] = true + } + if input.SyncKerberosPasswords == aad.SyncKerberosPasswordsEnabled { + result["sync_kerberos_passwords"] = true + } + if input.SyncNtlmPasswords == aad.SyncNtlmPasswordsEnabled { + result["sync_ntlm_passwords"] = true + } + if input.SyncOnPremPasswords == aad.SyncOnPremPasswordsEnabled { + result["sync_on_prem_passwords"] = true + } + if input.TLSV1 == aad.TLSV1Enabled { + result["tls_v1_enabled"] = true + } + + return []interface{}{result} +} diff --git a/azurerm/internal/services/domainservices/active_directory_domain_service_test.go b/azurerm/internal/services/domainservices/active_directory_domain_service_test.go new file mode 100644 index 000000000000..747546c35b6a --- /dev/null +++ b/azurerm/internal/services/domainservices/active_directory_domain_service_test.go @@ -0,0 +1,527 @@ +package domainservices_test + +import ( + "context" + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/domainservices/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +// To generate a suitable cert for AADDS: +// +// openssl req -subj '/CN=*.never.gonna.shut.you.down/O=HashiCorp, Inc./ST=CA/C=US' \ +// -addext "subjectAltName=DNS:never.gonna.shut.you.down,DNS:*.never.gonna.shut.you.down" \ +// -addext "keyUsage=critical,nonRepudiation,digitalSignature,keyEncipherment" \ +// -addext "extendedKeyUsage=1.3.6.1.5.5.7.3.1" \ +// -new -newkey rsa:2048 -sha256 -days 36500 -nodes -x509 -keyout aadds.key -out aadds.crt +// +// Then package as a pfx bundle: +// +// openssl pkcs12 -export -out "aadds.pfx" -inkey "aadds.key" -in "aadds.crt" \ +// -password pass:qwer5678 -keypbe PBE-SHA1-3DES -certpbe PBE-SHA1-3DES +// +// The configuration value is the base64 encoded representation of the resulting pkcs12 bundle: +// +// base64 25 { + return fmt.Errorf("the total number of `advanced_filter` values allowed on a single event subscription is 25, but %d are configured", valueCount) + } + } + return nil +} + func eventSubscriptionSchemaEventSubscriptionName() *pluginsdk.Schema { return &pluginsdk.Schema{ Type: pluginsdk.TypeString, @@ -257,6 +279,14 @@ func eventSubscriptionSchemaIncludedEventTypes() *pluginsdk.Schema { } } +func eventSubscriptionSchemaEnableAdvancedFilteringOnArrays() *pluginsdk.Schema { + return &pluginsdk.Schema{ + Type: pluginsdk.TypeBool, + Optional: true, + Default: false, + } +} + func eventSubscriptionSchemaSubjectFilter() *pluginsdk.Schema { return &pluginsdk.Schema{ Type: pluginsdk.TypeList, @@ -286,8 +316,9 @@ func eventSubscriptionSchemaSubjectFilter() *pluginsdk.Schema { func eventSubscriptionSchemaAdvancedFilter() *pluginsdk.Schema { atLeastOneOf := []string{"advanced_filter.0.bool_equals", "advanced_filter.0.number_greater_than", "advanced_filter.0.number_greater_than_or_equals", "advanced_filter.0.number_less_than", - "advanced_filter.0.number_less_than_or_equals", "advanced_filter.0.number_in", "advanced_filter.0.number_not_in", "advanced_filter.0.string_begins_with", - "advanced_filter.0.string_ends_with", "advanced_filter.0.string_contains", "advanced_filter.0.string_in", "advanced_filter.0.string_not_in", + "advanced_filter.0.number_less_than_or_equals", "advanced_filter.0.number_in", "advanced_filter.0.number_not_in", "advanced_filter.0.string_begins_with", "advanced_filter.0.string_not_begins_with", + "advanced_filter.0.string_ends_with", "advanced_filter.0.string_not_ends_with", "advanced_filter.0.string_contains", "advanced_filter.0.string_not_contains", "advanced_filter.0.string_in", + "advanced_filter.0.string_not_in", "advanced_filter.0.is_not_null", "advanced_filter.0.is_null_or_undefined", "advanced_filter.0.number_in_range", "advanced_filter.0.number_not_in_range", } return &pluginsdk.Schema{ Type: pluginsdk.TypeList, @@ -451,6 +482,28 @@ func eventSubscriptionSchemaAdvancedFilter() *pluginsdk.Schema { }, AtLeastOneOf: atLeastOneOf, }, + "string_not_begins_with": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "key": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "values": { + Type: pluginsdk.TypeList, + Required: true, + MaxItems: 25, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + }, + }, + AtLeastOneOf: atLeastOneOf, + }, "string_ends_with": { Type: pluginsdk.TypeList, Optional: true, @@ -473,6 +526,28 @@ func eventSubscriptionSchemaAdvancedFilter() *pluginsdk.Schema { }, AtLeastOneOf: atLeastOneOf, }, + "string_not_ends_with": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "key": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "values": { + Type: pluginsdk.TypeList, + Required: true, + MaxItems: 25, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + }, + }, + AtLeastOneOf: atLeastOneOf, + }, "string_contains": { Type: pluginsdk.TypeList, Optional: true, @@ -495,6 +570,28 @@ func eventSubscriptionSchemaAdvancedFilter() *pluginsdk.Schema { }, AtLeastOneOf: atLeastOneOf, }, + "string_not_contains": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "key": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "values": { + Type: pluginsdk.TypeList, + Required: true, + MaxItems: 25, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + }, + }, + AtLeastOneOf: atLeastOneOf, + }, "string_in": { Type: pluginsdk.TypeList, Optional: true, @@ -539,6 +636,88 @@ func eventSubscriptionSchemaAdvancedFilter() *pluginsdk.Schema { }, AtLeastOneOf: atLeastOneOf, }, + "is_not_null": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "key": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + }, + AtLeastOneOf: atLeastOneOf, + }, + "is_null_or_undefined": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "key": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + }, + AtLeastOneOf: atLeastOneOf, + }, + "number_in_range": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "key": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "values": { + Type: pluginsdk.TypeList, + Required: true, + MaxItems: 25, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeList, + MinItems: 2, + MaxItems: 2, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeFloat, + }, + }, + }, + }, + }, + AtLeastOneOf: atLeastOneOf, + }, + "number_not_in_range": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "key": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "values": { + Type: pluginsdk.TypeList, + Required: true, + MaxItems: 25, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeList, + MinItems: 2, + MaxItems: 2, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeFloat, + }, + }, + }, + }, + }, + AtLeastOneOf: atLeastOneOf, + }, }, }, } @@ -820,6 +999,10 @@ func expandEventGridEventSubscriptionFilter(d *pluginsdk.ResourceData) (*eventgr filter.AdvancedFilters = &advancedFilters } + if v, ok := d.GetOk("advanced_filtering_on_arrays_enabled"); ok { + filter.EnableAdvancedFilteringOnArrays = utils.Bool(v.(bool)) + } + return filter, nil } @@ -847,22 +1030,41 @@ func expandAdvancedFilter(operatorType string, config map[string]interface{}) (e return eventgrid.NumberInAdvancedFilter{Key: &k, OperatorType: eventgrid.OperatorTypeNumberIn, Values: v}, nil case "number_not_in": v := utils.ExpandFloatSlice(config["values"].([]interface{})) - return eventgrid.NumberNotInAdvancedFilter{Key: &k, OperatorType: eventgrid.OperatorTypeNumberIn, Values: v}, nil + return eventgrid.NumberNotInAdvancedFilter{Key: &k, OperatorType: eventgrid.OperatorTypeNumberNotIn, Values: v}, nil case "string_begins_with": v := utils.ExpandStringSlice(config["values"].([]interface{})) return eventgrid.StringBeginsWithAdvancedFilter{Key: &k, OperatorType: eventgrid.OperatorTypeStringBeginsWith, Values: v}, nil + case "string_not_begins_with": + v := utils.ExpandStringSlice(config["values"].([]interface{})) + return eventgrid.StringNotBeginsWithAdvancedFilter{Key: &k, OperatorType: eventgrid.OperatorTypeStringNotBeginsWith, Values: v}, nil case "string_ends_with": v := utils.ExpandStringSlice(config["values"].([]interface{})) return eventgrid.StringEndsWithAdvancedFilter{Key: &k, OperatorType: eventgrid.OperatorTypeStringEndsWith, Values: v}, nil + case "string_not_ends_with": + v := utils.ExpandStringSlice(config["values"].([]interface{})) + return eventgrid.StringNotEndsWithAdvancedFilter{Key: &k, OperatorType: eventgrid.OperatorTypeStringNotEndsWith, Values: v}, nil case "string_contains": v := utils.ExpandStringSlice(config["values"].([]interface{})) return eventgrid.StringContainsAdvancedFilter{Key: &k, OperatorType: eventgrid.OperatorTypeStringContains, Values: v}, nil + case "string_not_contains": + v := utils.ExpandStringSlice(config["values"].([]interface{})) + return eventgrid.StringNotContainsAdvancedFilter{Key: &k, OperatorType: eventgrid.OperatorTypeStringNotContains, Values: v}, nil case "string_in": v := utils.ExpandStringSlice(config["values"].([]interface{})) return eventgrid.StringInAdvancedFilter{Key: &k, OperatorType: eventgrid.OperatorTypeStringIn, Values: v}, nil case "string_not_in": v := utils.ExpandStringSlice(config["values"].([]interface{})) return eventgrid.StringNotInAdvancedFilter{Key: &k, OperatorType: eventgrid.OperatorTypeStringNotIn, Values: v}, nil + case "is_not_null": + return eventgrid.IsNotNullAdvancedFilter{Key: &k, OperatorType: eventgrid.OperatorTypeIsNotNull}, nil + case "is_null_or_undefined": + return eventgrid.IsNullOrUndefinedAdvancedFilter{Key: &k, OperatorType: eventgrid.OperatorTypeIsNullOrUndefined}, nil + case "number_in_range": + v := utils.ExpandFloatRangeSlice(config["values"].([]interface{})) + return eventgrid.NumberInRangeAdvancedFilter{Key: &k, OperatorType: eventgrid.OperatorTypeNumberInRange, Values: v}, nil + case "number_not_in_range": + v := utils.ExpandFloatRangeSlice(config["values"].([]interface{})) + return eventgrid.NumberNotInRangeAdvancedFilter{Key: &k, OperatorType: eventgrid.OperatorTypeNumberNotInRange, Values: v}, nil default: return nil, fmt.Errorf("Invalid `advanced_filter` operator_type %q used", operatorType) } @@ -1055,11 +1257,18 @@ func flattenEventGridEventSubscriptionAdvancedFilter(input *eventgrid.EventSubsc numberLessThanOrEquals := make([]interface{}, 0) numberIn := make([]interface{}, 0) numberNotIn := make([]interface{}, 0) + numberInRange := make([]interface{}, 0) + numberNotInRange := make([]interface{}, 0) stringBeginsWith := make([]interface{}, 0) + stringNotBeginsWith := make([]interface{}, 0) stringEndsWith := make([]interface{}, 0) + stringNotEndsWith := make([]interface{}, 0) stringContains := make([]interface{}, 0) + stringNotContains := make([]interface{}, 0) stringIn := make([]interface{}, 0) stringNotIn := make([]interface{}, 0) + isNotNull := make([]interface{}, 0) + isNullOrUndefined := make([]interface{}, 0) for _, item := range *input.AdvancedFilters { switch f := item.(type) { @@ -1087,18 +1296,37 @@ func flattenEventGridEventSubscriptionAdvancedFilter(input *eventgrid.EventSubsc case eventgrid.StringBeginsWithAdvancedFilter: v := utils.FlattenStringSlice(f.Values) stringBeginsWith = append(stringBeginsWith, flattenValues(f.Key, &v)) + case eventgrid.StringNotBeginsWithAdvancedFilter: + v := utils.FlattenStringSlice(f.Values) + stringNotBeginsWith = append(stringNotBeginsWith, flattenValues(f.Key, &v)) case eventgrid.StringEndsWithAdvancedFilter: v := utils.FlattenStringSlice(f.Values) stringEndsWith = append(stringEndsWith, flattenValues(f.Key, &v)) + case eventgrid.StringNotEndsWithAdvancedFilter: + v := utils.FlattenStringSlice(f.Values) + stringNotEndsWith = append(stringNotEndsWith, flattenValues(f.Key, &v)) case eventgrid.StringContainsAdvancedFilter: v := utils.FlattenStringSlice(f.Values) stringContains = append(stringContains, flattenValues(f.Key, &v)) + case eventgrid.StringNotContainsAdvancedFilter: + v := utils.FlattenStringSlice(f.Values) + stringNotContains = append(stringNotContains, flattenValues(f.Key, &v)) case eventgrid.StringInAdvancedFilter: v := utils.FlattenStringSlice(f.Values) stringIn = append(stringIn, flattenValues(f.Key, &v)) case eventgrid.StringNotInAdvancedFilter: v := utils.FlattenStringSlice(f.Values) stringNotIn = append(stringNotIn, flattenValues(f.Key, &v)) + case eventgrid.NumberInRangeAdvancedFilter: + v := utils.FlattenFloatRangeSlice(f.Values) + numberInRange = append(numberInRange, flattenRangeValues(f.Key, &v)) + case eventgrid.NumberNotInRangeAdvancedFilter: + v := utils.FlattenFloatRangeSlice(f.Values) + numberNotInRange = append(numberNotInRange, flattenRangeValues(f.Key, &v)) + case eventgrid.IsNotNullAdvancedFilter: + isNotNull = append(isNotNull, flattenKey(f.Key)) + case eventgrid.IsNullOrUndefinedAdvancedFilter: + isNullOrUndefined = append(isNullOrUndefined, flattenKey(f.Key)) } } @@ -1111,11 +1339,18 @@ func flattenEventGridEventSubscriptionAdvancedFilter(input *eventgrid.EventSubsc "number_less_than_or_equals": numberLessThanOrEquals, "number_in": numberIn, "number_not_in": numberNotIn, + "number_in_range": numberInRange, + "number_not_in_range": numberNotInRange, "string_begins_with": stringBeginsWith, + "string_not_begins_with": stringNotBeginsWith, "string_ends_with": stringEndsWith, + "string_not_ends_with": stringNotEndsWith, "string_contains": stringContains, + "string_not_contains": stringNotContains, "string_in": stringIn, "string_not_in": stringNotIn, + "is_not_null": isNotNull, + "is_null_or_undefined": isNullOrUndefined, }, } } @@ -1182,3 +1417,32 @@ func flattenValues(inputKey *string, inputValues *[]interface{}) map[string]inte "values": values, } } + +func flattenRangeValues(inputKey *string, inputValues *[][]interface{}) map[string]interface{} { + key := "" + if inputKey != nil { + key = *inputKey + } + values := make([]interface{}, 0) + if inputValues != nil { + for _, item := range *inputValues { + values = append(values, item) + } + } + + return map[string]interface{}{ + "key": key, + "values": values, + } +} + +func flattenKey(inputKey *string) map[string]interface{} { + key := "" + if inputKey != nil { + key = *inputKey + } + + return map[string]interface{}{ + "key": key, + } +} diff --git a/azurerm/internal/services/eventgrid/eventgrid_event_subscription_resource.go b/azurerm/internal/services/eventgrid/eventgrid_event_subscription_resource.go index 4afc66a04b85..cb4be8b0647e 100644 --- a/azurerm/internal/services/eventgrid/eventgrid_event_subscription_resource.go +++ b/azurerm/internal/services/eventgrid/eventgrid_event_subscription_resource.go @@ -44,6 +44,8 @@ func resourceEventGridEventSubscription() *pluginsdk.Resource { Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, + CustomizeDiff: pluginsdk.CustomizeDiffShim(eventSubscriptionCustomizeDiffAdvancedFilter), + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { _, err := parse.EventSubscriptionID(id) return err @@ -144,6 +146,8 @@ func resourceEventGridEventSubscription() *pluginsdk.Resource { "retry_policy": eventSubscriptionSchemaRetryPolicy(), "labels": eventSubscriptionSchemaLabels(), + + "advanced_filtering_on_arrays_enabled": eventSubscriptionSchemaEnableAdvancedFilteringOnArrays(), }, } } @@ -303,6 +307,7 @@ func resourceEventGridEventSubscriptionRead(d *pluginsdk.ResourceData, meta inte if filter := props.Filter; filter != nil { d.Set("included_event_types", filter.IncludedEventTypes) + d.Set("advanced_filtering_on_arrays_enabled", filter.EnableAdvancedFilteringOnArrays) if err := d.Set("subject_filter", flattenEventGridEventSubscriptionSubjectFilter(filter)); err != nil { return fmt.Errorf("Error setting `subject_filter` for EventGrid Event Subscription %q (Scope %q): %s", id.Name, id.Scope, err) } diff --git a/azurerm/internal/services/eventgrid/eventgrid_event_subscription_resource_test.go b/azurerm/internal/services/eventgrid/eventgrid_event_subscription_resource_test.go index 00f3be85d3eb..f36ca630db1d 100644 --- a/azurerm/internal/services/eventgrid/eventgrid_event_subscription_resource_test.go +++ b/azurerm/internal/services/eventgrid/eventgrid_event_subscription_resource_test.go @@ -150,6 +150,7 @@ func TestAccEventGridEventSubscription_filter(t *testing.T) { check.That(data.ResourceName).Key("included_event_types.1").HasValue("Microsoft.Storage.BlobDeleted"), check.That(data.ResourceName).Key("subject_filter.0.subject_ends_with").HasValue(".jpg"), check.That(data.ResourceName).Key("subject_filter.0.subject_begins_with").HasValue("test/test"), + check.That(data.ResourceName).Key("advanced_filtering_on_arrays_enabled").HasValue("true"), ), }, data.ImportStep(), @@ -157,7 +158,7 @@ func TestAccEventGridEventSubscription_filter(t *testing.T) { } func TestAccEventGridEventSubscription_advancedFilter(t *testing.T) { - data := acceptance.BuildTestData(t, "azurerm_eventgrid_event_subscription", "test") + data := acceptance.BuildTestData(t, "azurerm_eventgrid_event_subscription", "test1") r := EventGridEventSubscriptionResource{} data.ResourceTest(t, r, []acceptance.TestStep{ @@ -165,30 +166,7 @@ func TestAccEventGridEventSubscription_advancedFilter(t *testing.T) { Config: r.advancedFilter(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("advanced_filter.0.bool_equals.0.key").HasValue("subject"), - check.That(data.ResourceName).Key("advanced_filter.0.bool_equals.0.value").HasValue("true"), - check.That(data.ResourceName).Key("advanced_filter.0.number_greater_than.0.key").HasValue("data.metadataVersion"), - check.That(data.ResourceName).Key("advanced_filter.0.number_greater_than.0.value").HasValue("1"), - check.That(data.ResourceName).Key("advanced_filter.0.number_greater_than_or_equals.0.key").HasValue("data.contentLength"), - check.That(data.ResourceName).Key("advanced_filter.0.number_greater_than_or_equals.0.value").HasValue("42"), - check.That(data.ResourceName).Key("advanced_filter.0.number_less_than.0.key").HasValue("data.contentLength"), - check.That(data.ResourceName).Key("advanced_filter.0.number_less_than.0.value").HasValue("42.1"), - check.That(data.ResourceName).Key("advanced_filter.0.number_less_than_or_equals.0.key").HasValue("data.metadataVersion"), - check.That(data.ResourceName).Key("advanced_filter.0.number_less_than_or_equals.0.value").HasValue("2"), - check.That(data.ResourceName).Key("advanced_filter.0.number_in.0.key").HasValue("data.contentLength"), - check.That(data.ResourceName).Key("advanced_filter.0.number_in.0.values.0").HasValue("0"), - check.That(data.ResourceName).Key("advanced_filter.0.number_not_in.0.key").HasValue("data.contentLength"), - check.That(data.ResourceName).Key("advanced_filter.0.number_not_in.0.values.0").HasValue("5"), - check.That(data.ResourceName).Key("advanced_filter.0.string_begins_with.0.key").HasValue("subject"), - check.That(data.ResourceName).Key("advanced_filter.0.string_begins_with.0.values.0").HasValue("foo"), - check.That(data.ResourceName).Key("advanced_filter.0.string_ends_with.0.key").HasValue("subject"), - check.That(data.ResourceName).Key("advanced_filter.0.string_ends_with.0.values.0").HasValue("bar"), - check.That(data.ResourceName).Key("advanced_filter.0.string_contains.0.key").HasValue("data.contentType"), - check.That(data.ResourceName).Key("advanced_filter.0.string_contains.0.values.0").HasValue("application"), - check.That(data.ResourceName).Key("advanced_filter.0.string_in.0.key").HasValue("data.blobType"), - check.That(data.ResourceName).Key("advanced_filter.0.string_in.0.values.0").HasValue("Block"), - check.That(data.ResourceName).Key("advanced_filter.0.string_not_in.0.key").HasValue("data.blobType"), - check.That(data.ResourceName).Key("advanced_filter.0.string_not_in.0.values.0").HasValue("Page"), + check.That("azurerm_eventgrid_event_subscription.test2").ExistsInAzure(r), ), }, data.ImportStep(), @@ -204,43 +182,6 @@ func TestAccEventGridEventSubscription_advancedFilterMaxItems(t *testing.T) { Config: r.advancedFilterMaxItems(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("advanced_filter.0.bool_equals.0.key").HasValue("subject"), - check.That(data.ResourceName).Key("advanced_filter.0.bool_equals.0.value").HasValue("true"), - check.That(data.ResourceName).Key("advanced_filter.0.number_greater_than.0.key").HasValue("data.metadataVersion"), - check.That(data.ResourceName).Key("advanced_filter.0.number_greater_than.0.value").HasValue("2"), - check.That(data.ResourceName).Key("advanced_filter.0.number_greater_than_or_equals.0.key").HasValue("data.contentLength"), - check.That(data.ResourceName).Key("advanced_filter.0.number_greater_than_or_equals.0.value").HasValue("3"), - check.That(data.ResourceName).Key("advanced_filter.0.number_less_than.0.key").HasValue("data.contentLength"), - check.That(data.ResourceName).Key("advanced_filter.0.number_less_than.0.value").HasValue("4"), - check.That(data.ResourceName).Key("advanced_filter.0.number_less_than_or_equals.0.key").HasValue("data.metadataVersion"), - check.That(data.ResourceName).Key("advanced_filter.0.number_less_than_or_equals.0.value").HasValue("5"), - check.That(data.ResourceName).Key("advanced_filter.0.number_in.0.key").HasValue("data.contentLength"), - check.That(data.ResourceName).Key("advanced_filter.0.number_in.0.values.0").HasValue("6"), - check.That(data.ResourceName).Key("advanced_filter.0.number_in.0.values.1").HasValue("7"), - check.That(data.ResourceName).Key("advanced_filter.0.number_in.0.values.2").HasValue("8"), - check.That(data.ResourceName).Key("advanced_filter.0.number_not_in.0.key").HasValue("data.contentLength"), - check.That(data.ResourceName).Key("advanced_filter.0.number_not_in.0.values.0").HasValue("9"), - check.That(data.ResourceName).Key("advanced_filter.0.number_not_in.0.values.1").HasValue("10"), - check.That(data.ResourceName).Key("advanced_filter.0.number_not_in.0.values.2").HasValue("11"), - check.That(data.ResourceName).Key("advanced_filter.0.string_begins_with.0.key").HasValue("subject"), - check.That(data.ResourceName).Key("advanced_filter.0.string_begins_with.0.values.0").HasValue("12"), - check.That(data.ResourceName).Key("advanced_filter.0.string_begins_with.0.values.1").HasValue("13"), - check.That(data.ResourceName).Key("advanced_filter.0.string_begins_with.0.values.2").HasValue("14"), - check.That(data.ResourceName).Key("advanced_filter.0.string_ends_with.0.key").HasValue("subject"), - check.That(data.ResourceName).Key("advanced_filter.0.string_ends_with.0.values.0").HasValue("15"), - check.That(data.ResourceName).Key("advanced_filter.0.string_ends_with.0.values.1").HasValue("16"), - check.That(data.ResourceName).Key("advanced_filter.0.string_ends_with.0.values.2").HasValue("17"), - check.That(data.ResourceName).Key("advanced_filter.0.string_contains.0.key").HasValue("data.contentType"), - check.That(data.ResourceName).Key("advanced_filter.0.string_contains.0.values.0").HasValue("18"), - check.That(data.ResourceName).Key("advanced_filter.0.string_contains.0.values.1").HasValue("19"), - check.That(data.ResourceName).Key("advanced_filter.0.string_contains.0.values.2").HasValue("20"), - check.That(data.ResourceName).Key("advanced_filter.0.string_in.0.key").HasValue("data.blobType"), - check.That(data.ResourceName).Key("advanced_filter.0.string_in.0.values.0").HasValue("21"), - check.That(data.ResourceName).Key("advanced_filter.0.string_in.0.values.1").HasValue("22"), - check.That(data.ResourceName).Key("advanced_filter.0.string_in.0.values.2").HasValue("23"), - check.That(data.ResourceName).Key("advanced_filter.0.string_not_in.0.key").HasValue("data.blobType"), - check.That(data.ResourceName).Key("advanced_filter.0.string_not_in.0.values.0").HasValue("24"), - check.That(data.ResourceName).Key("advanced_filter.0.string_not_in.0.values.1").HasValue("25"), ), }, data.ImportStep(), @@ -268,12 +209,12 @@ provider "azurerm" { } resource "azurerm_resource_group" "test" { - name = "acctestRG-eg-%d" - location = "%s" + name = "acctestRG-eg-%[1]d" + location = "%[2]s" } resource "azurerm_storage_account" "test" { - name = "acctestacc%s" + name = "acctestacc%[3]s" resource_group_name = azurerm_resource_group.test.name location = azurerm_resource_group.test.location account_tier = "Standard" @@ -285,7 +226,7 @@ resource "azurerm_storage_account" "test" { } resource "azurerm_storage_queue" "test" { - name = "mysamplequeue-%d" + name = "mysamplequeue-%[1]d" storage_account_name = azurerm_storage_account.test.name } @@ -306,7 +247,7 @@ resource "azurerm_storage_blob" "test" { } resource "azurerm_eventgrid_event_subscription" "test" { - name = "acctesteg-%d" + name = "acctesteg-%[1]d" scope = azurerm_resource_group.test.id storage_queue_endpoint { @@ -326,7 +267,7 @@ resource "azurerm_eventgrid_event_subscription" "test" { labels = ["test", "test1", "test2"] } -`, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomInteger, data.RandomInteger) +`, data.RandomInteger, data.Locations.Primary, data.RandomString) } func (EventGridEventSubscriptionResource) requiresImport(data acceptance.TestData) string { @@ -348,12 +289,12 @@ provider "azurerm" { } resource "azurerm_resource_group" "test" { - name = "acctestRG-eg-%d" - location = "%s" + name = "acctestRG-eg-%[1]d" + location = "%[2]s" } resource "azurerm_storage_account" "test" { - name = "acctestacc%s" + name = "acctestacc%[3]s" resource_group_name = azurerm_resource_group.test.name location = azurerm_resource_group.test.location account_tier = "Standard" @@ -365,7 +306,7 @@ resource "azurerm_storage_account" "test" { } resource "azurerm_storage_queue" "test" { - name = "mysamplequeue-%d" + name = "mysamplequeue-%[1]d" storage_account_name = azurerm_storage_account.test.name } @@ -386,7 +327,7 @@ resource "azurerm_storage_blob" "test" { } resource "azurerm_eventgrid_event_subscription" "test" { - name = "acctest-eg-%d" + name = "acctest-eg-%[1]d" scope = azurerm_resource_group.test.id storage_queue_endpoint { @@ -412,7 +353,7 @@ resource "azurerm_eventgrid_event_subscription" "test" { included_event_types = ["Microsoft.Storage.BlobCreated", "Microsoft.Storage.BlobDeleted"] labels = ["test4", "test5", "test6"] } -`, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomInteger, data.RandomInteger) +`, data.RandomInteger, data.Locations.Primary, data.RandomString) } func (EventGridEventSubscriptionResource) eventHubID(data acceptance.TestData) string { @@ -422,19 +363,19 @@ provider "azurerm" { } resource "azurerm_resource_group" "test" { - name = "acctestRG-eg-%d" - location = "%s" + name = "acctestRG-eg-%[1]d" + location = "%[2]s" } resource "azurerm_eventhub_namespace" "test" { - name = "acctesteventhubnamespace-%d" + name = "acctesteventhubnamespace-%[1]d" location = azurerm_resource_group.test.location resource_group_name = azurerm_resource_group.test.name sku = "Basic" } resource "azurerm_eventhub" "test" { - name = "acctesteventhub-%d" + name = "acctesteventhub-%[1]d" namespace_name = azurerm_eventhub_namespace.test.name resource_group_name = azurerm_resource_group.test.name partition_count = 2 @@ -442,13 +383,13 @@ resource "azurerm_eventhub" "test" { } resource "azurerm_eventgrid_event_subscription" "test" { - name = "acctest-eg-%d" + name = "acctest-eg-%[1]d" scope = azurerm_resource_group.test.id event_delivery_schema = "CloudEventSchemaV1_0" eventhub_endpoint_id = azurerm_eventhub.test.id } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger) +`, data.RandomInteger, data.Locations.Primary) } func (EventGridEventSubscriptionResource) serviceBusQueueID(data acceptance.TestData) string { @@ -457,29 +398,29 @@ provider "azurerm" { features {} } resource "azurerm_resource_group" "test" { - name = "acctestRG-eg-%d" - location = "%s" + name = "acctestRG-eg-%[1]d" + location = "%[2]s" } resource "azurerm_servicebus_namespace" "example" { - name = "acctestservicebusnamespace-%d" + name = "acctestservicebusnamespace-%[1]d" location = azurerm_resource_group.test.location resource_group_name = azurerm_resource_group.test.name sku = "Basic" } resource "azurerm_servicebus_queue" "test" { - name = "acctestservicebusqueue-%d" + name = "acctestservicebusqueue-%[1]d" resource_group_name = azurerm_resource_group.test.name namespace_name = azurerm_servicebus_namespace.example.name enable_partitioning = true } resource "azurerm_eventgrid_event_subscription" "test" { - name = "acctest-eg-%d" + name = "acctest-eg-%[1]d" scope = azurerm_resource_group.test.id event_delivery_schema = "CloudEventSchemaV1_0" service_bus_queue_endpoint_id = azurerm_servicebus_queue.test.id } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger) +`, data.RandomInteger, data.Locations.Primary) } func (EventGridEventSubscriptionResource) serviceBusTopicID(data acceptance.TestData) string { @@ -488,28 +429,28 @@ provider "azurerm" { features {} } resource "azurerm_resource_group" "test" { - name = "acctestRG-eg-%d" - location = "%s" + name = "acctestRG-eg-%[1]d" + location = "%[2]s" } resource "azurerm_servicebus_namespace" "example" { - name = "acctestservicebusnamespace-%d" + name = "acctestservicebusnamespace-%[1]d" location = azurerm_resource_group.test.location resource_group_name = azurerm_resource_group.test.name sku = "Standard" } resource "azurerm_servicebus_topic" "test" { - name = "acctestservicebustopic-%d" + name = "acctestservicebustopic-%[1]d" resource_group_name = azurerm_resource_group.test.name namespace_name = azurerm_servicebus_namespace.example.name enable_partitioning = true } resource "azurerm_eventgrid_event_subscription" "test" { - name = "acctest-eg-%d" + name = "acctest-eg-%[1]d" scope = azurerm_resource_group.test.id event_delivery_schema = "CloudEventSchemaV1_0" service_bus_topic_endpoint_id = azurerm_servicebus_topic.test.id } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger) +`, data.RandomInteger, data.Locations.Primary) } func (EventGridEventSubscriptionResource) filter(data acceptance.TestData) string { @@ -519,12 +460,12 @@ provider "azurerm" { } resource "azurerm_resource_group" "test" { - name = "acctestRG-eg-%d" - location = "%s" + name = "acctestRG-eg-%[1]d" + location = "%[2]s" } resource "azurerm_storage_account" "test" { - name = "acctestacc%s" + name = "acctestacc%[3]s" resource_group_name = azurerm_resource_group.test.name location = azurerm_resource_group.test.location account_tier = "Standard" @@ -536,12 +477,12 @@ resource "azurerm_storage_account" "test" { } resource "azurerm_storage_queue" "test" { - name = "mysamplequeue-%d" + name = "mysamplequeue-%[1]d" storage_account_name = azurerm_storage_account.test.name } resource "azurerm_eventgrid_event_subscription" "test" { - name = "acctest-eg-%d" + name = "acctest-eg-%[1]d" scope = azurerm_resource_group.test.id storage_queue_endpoint { @@ -549,6 +490,8 @@ resource "azurerm_eventgrid_event_subscription" "test" { queue_name = azurerm_storage_queue.test.name } + advanced_filtering_on_arrays_enabled = true + included_event_types = ["Microsoft.Storage.BlobCreated", "Microsoft.Storage.BlobDeleted"] subject_filter { @@ -556,7 +499,7 @@ resource "azurerm_eventgrid_event_subscription" "test" { subject_ends_with = ".jpg" } } -`, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomInteger, data.RandomInteger) +`, data.RandomInteger, data.Locations.Primary, data.RandomString) } func (EventGridEventSubscriptionResource) advancedFilter(data acceptance.TestData) string { @@ -566,12 +509,12 @@ provider "azurerm" { } resource "azurerm_resource_group" "test" { - name = "acctestRG-eg-%d" - location = "%s" + name = "acctestRG-eg-%[1]d" + location = "%[2]s" } resource "azurerm_storage_account" "test" { - name = "acctestacc%s" + name = "acctestacc%[3]s" resource_group_name = azurerm_resource_group.test.name location = azurerm_resource_group.test.location account_tier = "Standard" @@ -583,12 +526,12 @@ resource "azurerm_storage_account" "test" { } resource "azurerm_storage_queue" "test" { - name = "mysamplequeue-%d" + name = "mysamplequeue-%[1]d" storage_account_name = azurerm_storage_account.test.name } -resource "azurerm_eventgrid_event_subscription" "test" { - name = "acctesteg-%d" +resource "azurerm_eventgrid_event_subscription" "test1" { + name = "acctesteg-%[1]d-1" scope = azurerm_storage_account.test.id storage_queue_endpoint { @@ -625,18 +568,51 @@ resource "azurerm_eventgrid_event_subscription" "test" { key = "data.contentLength" values = [5, 8, 13, 21, 34] } + number_in_range { + key = "data.contentLength" + values = [[0, 1], [2, 3]] + } + number_not_in_range { + key = "data.contentLength" + values = [[5, 13], [21, 34]] + } string_begins_with { key = "subject" values = ["foo"] } + } +} + +resource "azurerm_eventgrid_event_subscription" "test2" { + name = "acctesteg-%[1]d-2" + scope = azurerm_storage_account.test.id + + storage_queue_endpoint { + storage_account_id = azurerm_storage_account.test.id + queue_name = azurerm_storage_queue.test.name + } + + advanced_filter { string_ends_with { key = "subject" values = ["bar"] } + string_not_begins_with { + key = "subject" + values = ["lorem"] + } + string_not_ends_with { + key = "subject" + values = ["ipsum"] + } string_contains { key = "data.contentType" values = ["application", "octet-stream"] } + string_not_contains { + key = "data.contentType" + values = ["text"] + } string_in { key = "data.blobType" values = ["Block"] @@ -645,10 +621,15 @@ resource "azurerm_eventgrid_event_subscription" "test" { key = "data.blobType" values = ["Page"] } + is_not_null { + key = "subject" + } + is_null_or_undefined { + key = "subject" + } } - } -`, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomInteger, data.RandomInteger) +`, data.RandomInteger, data.Locations.Primary, data.RandomString) } func (EventGridEventSubscriptionResource) advancedFilterMaxItems(data acceptance.TestData) string { @@ -658,12 +639,12 @@ provider "azurerm" { } resource "azurerm_resource_group" "test" { - name = "acctestRG-eg-%d" - location = "%s" + name = "acctestRG-eg-%[1]d" + location = "%[2]s" } resource "azurerm_storage_account" "test" { - name = "acctestacc%s" + name = "acctestacc%[3]s" resource_group_name = azurerm_resource_group.test.name location = azurerm_resource_group.test.location account_tier = "Standard" @@ -675,12 +656,12 @@ resource "azurerm_storage_account" "test" { } resource "azurerm_storage_queue" "test" { - name = "mysamplequeue-%d" + name = "mysamplequeue-%[1]d" storage_account_name = azurerm_storage_account.test.name } resource "azurerm_eventgrid_event_subscription" "test" { - name = "acctesteg-%d" + name = "acctesteg-%[1]d" scope = azurerm_storage_account.test.id storage_queue_endpoint { @@ -740,5 +721,5 @@ resource "azurerm_eventgrid_event_subscription" "test" { } } -`, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomInteger, data.RandomInteger) +`, data.RandomInteger, data.Locations.Primary, data.RandomString) } diff --git a/azurerm/internal/services/eventgrid/eventgrid_system_topic_event_subscription_resource.go b/azurerm/internal/services/eventgrid/eventgrid_system_topic_event_subscription_resource.go index d4e64dffbd05..2a3437c6f97e 100644 --- a/azurerm/internal/services/eventgrid/eventgrid_system_topic_event_subscription_resource.go +++ b/azurerm/internal/services/eventgrid/eventgrid_system_topic_event_subscription_resource.go @@ -124,6 +124,8 @@ func resourceEventGridSystemTopicEventSubscription() *pluginsdk.Resource { "retry_policy": eventSubscriptionSchemaRetryPolicy(), "labels": eventSubscriptionSchemaLabels(), + + "advanced_filtering_on_arrays_enabled": eventSubscriptionSchemaEnableAdvancedFilteringOnArrays(), }, } } @@ -277,6 +279,7 @@ func resourceEventGridSystemTopicEventSubscriptionRead(d *pluginsdk.ResourceData if filter := props.Filter; filter != nil { d.Set("included_event_types", filter.IncludedEventTypes) + d.Set("advanced_filtering_on_arrays_enabled", filter.EnableAdvancedFilteringOnArrays) if err := d.Set("subject_filter", flattenEventGridEventSubscriptionSubjectFilter(filter)); err != nil { return fmt.Errorf("Error setting `subject_filter` for EventGrid System Topic Event Subscription %q (System Topic %q): %s", id.Name, id.SystemTopic, err) } diff --git a/azurerm/internal/services/eventgrid/eventgrid_system_topic_event_subscription_resource_test.go b/azurerm/internal/services/eventgrid/eventgrid_system_topic_event_subscription_resource_test.go index a8382b542af1..4b0efd467e5e 100644 --- a/azurerm/internal/services/eventgrid/eventgrid_system_topic_event_subscription_resource_test.go +++ b/azurerm/internal/services/eventgrid/eventgrid_system_topic_event_subscription_resource_test.go @@ -150,6 +150,7 @@ func TestAccEventGridSystemTopicEventSubscription_filter(t *testing.T) { check.That(data.ResourceName).Key("included_event_types.1").HasValue("Microsoft.Storage.BlobDeleted"), check.That(data.ResourceName).Key("subject_filter.0.subject_ends_with").HasValue(".jpg"), check.That(data.ResourceName).Key("subject_filter.0.subject_begins_with").HasValue("test/test"), + check.That(data.ResourceName).Key("advanced_filtering_on_arrays_enabled").HasValue("true"), ), }, data.ImportStep(), @@ -157,7 +158,7 @@ func TestAccEventGridSystemTopicEventSubscription_filter(t *testing.T) { } func TestAccEventGridSystemTopicEventSubscription_advancedFilter(t *testing.T) { - data := acceptance.BuildTestData(t, "azurerm_eventgrid_system_topic_event_subscription", "test") + data := acceptance.BuildTestData(t, "azurerm_eventgrid_system_topic_event_subscription", "test1") r := EventGridSystemTopicEventSubscriptionResource{} data.ResourceTest(t, r, []acceptance.TestStep{ @@ -165,30 +166,7 @@ func TestAccEventGridSystemTopicEventSubscription_advancedFilter(t *testing.T) { Config: r.advancedFilter(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("advanced_filter.0.bool_equals.0.key").HasValue("subject"), - check.That(data.ResourceName).Key("advanced_filter.0.bool_equals.0.value").HasValue("true"), - check.That(data.ResourceName).Key("advanced_filter.0.number_greater_than.0.key").HasValue("data.metadataVersion"), - check.That(data.ResourceName).Key("advanced_filter.0.number_greater_than.0.value").HasValue("1"), - check.That(data.ResourceName).Key("advanced_filter.0.number_greater_than_or_equals.0.key").HasValue("data.contentLength"), - check.That(data.ResourceName).Key("advanced_filter.0.number_greater_than_or_equals.0.value").HasValue("42"), - check.That(data.ResourceName).Key("advanced_filter.0.number_less_than.0.key").HasValue("data.contentLength"), - check.That(data.ResourceName).Key("advanced_filter.0.number_less_than.0.value").HasValue("42.1"), - check.That(data.ResourceName).Key("advanced_filter.0.number_less_than_or_equals.0.key").HasValue("data.metadataVersion"), - check.That(data.ResourceName).Key("advanced_filter.0.number_less_than_or_equals.0.value").HasValue("2"), - check.That(data.ResourceName).Key("advanced_filter.0.number_in.0.key").HasValue("data.contentLength"), - check.That(data.ResourceName).Key("advanced_filter.0.number_in.0.values.0").HasValue("0"), - check.That(data.ResourceName).Key("advanced_filter.0.number_not_in.0.key").HasValue("data.contentLength"), - check.That(data.ResourceName).Key("advanced_filter.0.number_not_in.0.values.0").HasValue("5"), - check.That(data.ResourceName).Key("advanced_filter.0.string_begins_with.0.key").HasValue("subject"), - check.That(data.ResourceName).Key("advanced_filter.0.string_begins_with.0.values.0").HasValue("foo"), - check.That(data.ResourceName).Key("advanced_filter.0.string_ends_with.0.key").HasValue("subject"), - check.That(data.ResourceName).Key("advanced_filter.0.string_ends_with.0.values.0").HasValue("bar"), - check.That(data.ResourceName).Key("advanced_filter.0.string_contains.0.key").HasValue("data.contentType"), - check.That(data.ResourceName).Key("advanced_filter.0.string_contains.0.values.0").HasValue("application"), - check.That(data.ResourceName).Key("advanced_filter.0.string_in.0.key").HasValue("data.blobType"), - check.That(data.ResourceName).Key("advanced_filter.0.string_in.0.values.0").HasValue("Block"), - check.That(data.ResourceName).Key("advanced_filter.0.string_not_in.0.key").HasValue("data.blobType"), - check.That(data.ResourceName).Key("advanced_filter.0.string_not_in.0.values.0").HasValue("Page"), + check.That("azurerm_eventgrid_system_topic_event_subscription.test2").ExistsInAzure(r), ), }, data.ImportStep(), @@ -204,43 +182,6 @@ func TestAccEventGridSystemTopicEventSubscription_advancedFilterMaxItems(t *test Config: r.advancedFilterMaxItems(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("advanced_filter.0.bool_equals.0.key").HasValue("subject"), - check.That(data.ResourceName).Key("advanced_filter.0.bool_equals.0.value").HasValue("true"), - check.That(data.ResourceName).Key("advanced_filter.0.number_greater_than.0.key").HasValue("data.metadataVersion"), - check.That(data.ResourceName).Key("advanced_filter.0.number_greater_than.0.value").HasValue("2"), - check.That(data.ResourceName).Key("advanced_filter.0.number_greater_than_or_equals.0.key").HasValue("data.contentLength"), - check.That(data.ResourceName).Key("advanced_filter.0.number_greater_than_or_equals.0.value").HasValue("3"), - check.That(data.ResourceName).Key("advanced_filter.0.number_less_than.0.key").HasValue("data.contentLength"), - check.That(data.ResourceName).Key("advanced_filter.0.number_less_than.0.value").HasValue("4"), - check.That(data.ResourceName).Key("advanced_filter.0.number_less_than_or_equals.0.key").HasValue("data.metadataVersion"), - check.That(data.ResourceName).Key("advanced_filter.0.number_less_than_or_equals.0.value").HasValue("5"), - check.That(data.ResourceName).Key("advanced_filter.0.number_in.0.key").HasValue("data.contentLength"), - check.That(data.ResourceName).Key("advanced_filter.0.number_in.0.values.0").HasValue("6"), - check.That(data.ResourceName).Key("advanced_filter.0.number_in.0.values.1").HasValue("7"), - check.That(data.ResourceName).Key("advanced_filter.0.number_in.0.values.2").HasValue("8"), - check.That(data.ResourceName).Key("advanced_filter.0.number_not_in.0.key").HasValue("data.contentLength"), - check.That(data.ResourceName).Key("advanced_filter.0.number_not_in.0.values.0").HasValue("9"), - check.That(data.ResourceName).Key("advanced_filter.0.number_not_in.0.values.1").HasValue("10"), - check.That(data.ResourceName).Key("advanced_filter.0.number_not_in.0.values.2").HasValue("11"), - check.That(data.ResourceName).Key("advanced_filter.0.string_begins_with.0.key").HasValue("subject"), - check.That(data.ResourceName).Key("advanced_filter.0.string_begins_with.0.values.0").HasValue("12"), - check.That(data.ResourceName).Key("advanced_filter.0.string_begins_with.0.values.1").HasValue("13"), - check.That(data.ResourceName).Key("advanced_filter.0.string_begins_with.0.values.2").HasValue("14"), - check.That(data.ResourceName).Key("advanced_filter.0.string_ends_with.0.key").HasValue("subject"), - check.That(data.ResourceName).Key("advanced_filter.0.string_ends_with.0.values.0").HasValue("15"), - check.That(data.ResourceName).Key("advanced_filter.0.string_ends_with.0.values.1").HasValue("16"), - check.That(data.ResourceName).Key("advanced_filter.0.string_ends_with.0.values.2").HasValue("17"), - check.That(data.ResourceName).Key("advanced_filter.0.string_contains.0.key").HasValue("data.contentType"), - check.That(data.ResourceName).Key("advanced_filter.0.string_contains.0.values.0").HasValue("18"), - check.That(data.ResourceName).Key("advanced_filter.0.string_contains.0.values.1").HasValue("19"), - check.That(data.ResourceName).Key("advanced_filter.0.string_contains.0.values.2").HasValue("20"), - check.That(data.ResourceName).Key("advanced_filter.0.string_in.0.key").HasValue("data.blobType"), - check.That(data.ResourceName).Key("advanced_filter.0.string_in.0.values.0").HasValue("21"), - check.That(data.ResourceName).Key("advanced_filter.0.string_in.0.values.1").HasValue("22"), - check.That(data.ResourceName).Key("advanced_filter.0.string_in.0.values.2").HasValue("23"), - check.That(data.ResourceName).Key("advanced_filter.0.string_not_in.0.key").HasValue("data.blobType"), - check.That(data.ResourceName).Key("advanced_filter.0.string_not_in.0.values.0").HasValue("24"), - check.That(data.ResourceName).Key("advanced_filter.0.string_not_in.0.values.1").HasValue("25"), ), }, data.ImportStep(), @@ -614,6 +555,8 @@ resource "azurerm_eventgrid_system_topic_event_subscription" "test" { queue_name = azurerm_storage_queue.test.name } + advanced_filtering_on_arrays_enabled = true + included_event_types = ["Microsoft.Storage.BlobCreated", "Microsoft.Storage.BlobDeleted"] subject_filter { @@ -660,8 +603,8 @@ resource "azurerm_eventgrid_system_topic" "test" { topic_type = "Microsoft.Resources.ResourceGroups" } -resource "azurerm_eventgrid_system_topic_event_subscription" "test" { - name = "acctesteg-%[1]d" +resource "azurerm_eventgrid_system_topic_event_subscription" "test1" { + name = "acctesteg-%[1]d-1" system_topic = azurerm_eventgrid_system_topic.test.name resource_group_name = azurerm_resource_group.test.name @@ -699,14 +642,48 @@ resource "azurerm_eventgrid_system_topic_event_subscription" "test" { key = "data.contentLength" values = [5, 8, 13, 21, 34] } + number_in_range { + key = "data.contentLength" + values = [[0, 1], [2, 3]] + } + number_not_in_range { + key = "data.contentLength" + values = [[5, 13], [21, 34]] + } string_begins_with { key = "subject" values = ["foo"] } + } +} + +resource "azurerm_eventgrid_system_topic_event_subscription" "test2" { + name = "acctesteg-%[1]d-2" + system_topic = azurerm_eventgrid_system_topic.test.name + resource_group_name = azurerm_resource_group.test.name + + storage_queue_endpoint { + storage_account_id = azurerm_storage_account.test.id + queue_name = azurerm_storage_queue.test.name + } + + advanced_filter { string_ends_with { key = "subject" values = ["bar"] } + string_not_begins_with { + key = "subject" + values = ["lorem"] + } + string_not_ends_with { + key = "subject" + values = ["ipsum"] + } + string_not_contains { + key = "data.contentType" + values = ["text"] + } string_contains { key = "data.contentType" values = ["application", "octet-stream"] @@ -719,8 +696,13 @@ resource "azurerm_eventgrid_system_topic_event_subscription" "test" { key = "data.blobType" values = ["Page"] } + is_not_null { + key = "subject" + } + is_null_or_undefined { + key = "subject" + } } - } `, data.RandomInteger, data.Locations.Primary, data.RandomString) } diff --git a/azurerm/internal/services/eventhub/client/client.go b/azurerm/internal/services/eventhub/client/client.go index caa3ca7d445d..1482992c9254 100644 --- a/azurerm/internal/services/eventhub/client/client.go +++ b/azurerm/internal/services/eventhub/client/client.go @@ -1,39 +1,67 @@ package client import ( - "github.com/Azure/azure-sdk-for-go/services/preview/eventhub/mgmt/2018-01-01-preview/eventhub" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/common" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/consumergroups" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/eventhubs" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/eventhubsclusters" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/namespaces" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/networkrulesets" ) type Client struct { - ClusterClient *eventhub.ClustersClient - ConsumerGroupClient *eventhub.ConsumerGroupsClient - DisasterRecoveryConfigsClient *eventhub.DisasterRecoveryConfigsClient - EventHubsClient *eventhub.EventHubsClient - NamespacesClient *eventhub.NamespacesClient + ClusterClient *eventhubsclusters.EventHubsClustersClient + ConsumerGroupClient *consumergroups.ConsumerGroupsClient + DisasterRecoveryConfigsClient *disasterrecoveryconfigs.DisasterRecoveryConfigsClient + DisasterRecoveryNameAvailabilityClient *checknameavailabilitydisasterrecoveryconfigs.CheckNameAvailabilityDisasterRecoveryConfigsClient + EventHubsClient *eventhubs.EventHubsClient + EventHubAuthorizationRulesClient *authorizationruleseventhubs.AuthorizationRulesEventHubsClient + NamespacesClient *namespaces.NamespacesClient + NamespaceAuthorizationRulesClient *authorizationrulesnamespaces.AuthorizationRulesNamespacesClient + NetworkRuleSetsClient *networkrulesets.NetworkRuleSetsClient } func NewClient(o *common.ClientOptions) *Client { - ClustersClient := eventhub.NewClustersClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) - o.ConfigureClient(&ClustersClient.Client, o.ResourceManagerAuthorizer) + clustersClient := eventhubsclusters.NewEventHubsClustersClientWithBaseURI(o.ResourceManagerEndpoint) + o.ConfigureClient(&clustersClient.Client, o.ResourceManagerAuthorizer) - EventHubsClient := eventhub.NewEventHubsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) - o.ConfigureClient(&EventHubsClient.Client, o.ResourceManagerAuthorizer) + consumerGroupsClient := consumergroups.NewConsumerGroupsClientWithBaseURI(o.ResourceManagerEndpoint) + o.ConfigureClient(&consumerGroupsClient.Client, o.ResourceManagerAuthorizer) - DisasterRecoveryConfigsClient := eventhub.NewDisasterRecoveryConfigsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) - o.ConfigureClient(&DisasterRecoveryConfigsClient.Client, o.ResourceManagerAuthorizer) + disasterRecoveryConfigsClient := disasterrecoveryconfigs.NewDisasterRecoveryConfigsClientWithBaseURI(o.ResourceManagerEndpoint) + o.ConfigureClient(&disasterRecoveryConfigsClient.Client, o.ResourceManagerAuthorizer) - ConsumerGroupClient := eventhub.NewConsumerGroupsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) - o.ConfigureClient(&ConsumerGroupClient.Client, o.ResourceManagerAuthorizer) + disasterRecoveryNameAvailabilityClient := checknameavailabilitydisasterrecoveryconfigs.NewCheckNameAvailabilityDisasterRecoveryConfigsClientWithBaseURI(o.ResourceManagerEndpoint) + o.ConfigureClient(&disasterRecoveryNameAvailabilityClient.Client, o.ResourceManagerAuthorizer) - NamespacesClient := eventhub.NewNamespacesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) - o.ConfigureClient(&NamespacesClient.Client, o.ResourceManagerAuthorizer) + eventhubsClient := eventhubs.NewEventHubsClientWithBaseURI(o.ResourceManagerEndpoint) + o.ConfigureClient(&eventhubsClient.Client, o.ResourceManagerAuthorizer) + + eventHubAuthorizationRulesClient := authorizationruleseventhubs.NewAuthorizationRulesEventHubsClientWithBaseURI(o.ResourceManagerEndpoint) + o.ConfigureClient(&eventHubAuthorizationRulesClient.Client, o.ResourceManagerAuthorizer) + + namespacesClient := namespaces.NewNamespacesClientWithBaseURI(o.ResourceManagerEndpoint) + o.ConfigureClient(&namespacesClient.Client, o.ResourceManagerAuthorizer) + + namespaceAuthorizationRulesClient := authorizationrulesnamespaces.NewAuthorizationRulesNamespacesClientWithBaseURI(o.ResourceManagerEndpoint) + o.ConfigureClient(&namespaceAuthorizationRulesClient.Client, o.ResourceManagerAuthorizer) + + networkRuleSetsClient := networkrulesets.NewNetworkRuleSetsClientWithBaseURI(o.ResourceManagerEndpoint) + o.ConfigureClient(&networkRuleSetsClient.Client, o.ResourceManagerAuthorizer) return &Client{ - ClusterClient: &ClustersClient, - ConsumerGroupClient: &ConsumerGroupClient, - DisasterRecoveryConfigsClient: &DisasterRecoveryConfigsClient, - EventHubsClient: &EventHubsClient, - NamespacesClient: &NamespacesClient, + ClusterClient: &clustersClient, + ConsumerGroupClient: &consumerGroupsClient, + DisasterRecoveryConfigsClient: &disasterRecoveryConfigsClient, + DisasterRecoveryNameAvailabilityClient: &disasterRecoveryNameAvailabilityClient, + EventHubsClient: &eventhubsClient, + EventHubAuthorizationRulesClient: &eventHubAuthorizationRulesClient, + NamespacesClient: &namespacesClient, + NamespaceAuthorizationRulesClient: &namespaceAuthorizationRulesClient, + NetworkRuleSetsClient: &networkRuleSetsClient, } } diff --git a/azurerm/internal/services/eventhub/eventhub_authorization_rule_data_source.go b/azurerm/internal/services/eventhub/eventhub_authorization_rule_data_source.go index 1497037597d6..0f018bcaf9e1 100644 --- a/azurerm/internal/services/eventhub/eventhub_authorization_rule_data_source.go +++ b/azurerm/internal/services/eventhub/eventhub_authorization_rule_data_source.go @@ -4,61 +4,63 @@ import ( "fmt" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/eventhubs" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func EventHubAuthorizationRuleDataSource() *schema.Resource { - return &schema.Resource{ +func EventHubAuthorizationRuleDataSource() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: EventHubAuthorizationRuleDataSourceRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: eventHubAuthorizationRuleSchemaFrom(map[string]*schema.Schema{ + Schema: eventHubAuthorizationRuleSchemaFrom(map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.ValidateEventHubAuthorizationRuleName(), }, "namespace_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.ValidateEventHubNamespaceName(), }, "eventhub_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.ValidateEventHubName(), }, "primary_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "secondary_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "primary_connection_string": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "secondary_connection_string": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, @@ -70,8 +72,10 @@ func EventHubAuthorizationRuleDataSource() *schema.Resource { } } -func EventHubAuthorizationRuleDataSourceRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clients.Client).Eventhub.EventHubsClient +func EventHubAuthorizationRuleDataSourceRead(d *pluginsdk.ResourceData, meta interface{}) error { + eventHubsClient := meta.(*clients.Client).Eventhub.EventHubsClient + rulesClient := meta.(*clients.Client).Eventhub.EventHubAuthorizationRulesClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -80,32 +84,35 @@ func EventHubAuthorizationRuleDataSourceRead(d *schema.ResourceData, meta interf eventHubName := d.Get("eventhub_name").(string) namespaceName := d.Get("namespace_name").(string) - resp, err := client.GetAuthorizationRule(ctx, resourceGroup, namespaceName, eventHubName, name) + id := eventhubs.NewAuthorizationRuleID(subscriptionId, resourceGroup, namespaceName, eventHubName, name) + resp, err := eventHubsClient.GetAuthorizationRule(ctx, id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { - return fmt.Errorf("Error: EventHub Authorization Rule %q (Resource Group %q) was not found", name, resourceGroup) + if response.WasNotFound(resp.HttpResponse) { + return fmt.Errorf("%s was not found", id) } - return fmt.Errorf("Error: EventHub Authorization Rule %s: %+v", name, err) + return fmt.Errorf("retrieving %s: %+v", id, err) } - d.SetId(*resp.ID) + d.SetId(id.ID()) + d.Set("name", id.Name) + d.Set("eventhub_name", id.EventhubName) + d.Set("namespace_name", id.NamespaceName) + d.Set("resource_group_name", id.ResourceGroup) - d.Set("name", name) - d.Set("eventhub_name", eventHubName) - d.Set("namespace_name", namespaceName) - d.Set("resource_group_name", resourceGroup) - - keysResp, err := client.ListKeys(ctx, resourceGroup, namespaceName, eventHubName, name) + localId := authorizationruleseventhubs.NewAuthorizationRuleID(id.SubscriptionId, id.ResourceGroup, id.NamespaceName, id.EventhubName, id.Name) + keysResp, err := rulesClient.EventHubsListKeys(ctx, localId) if err != nil { - return fmt.Errorf("Error making Read request on Azure EventHub Authorization Rule List Keys %s: %+v", name, err) + return fmt.Errorf("listing keys for %s: %+v", id, err) } - d.Set("primary_key", keysResp.PrimaryKey) - d.Set("secondary_key", keysResp.SecondaryKey) - d.Set("primary_connection_string", keysResp.PrimaryConnectionString) - d.Set("secondary_connection_string", keysResp.SecondaryConnectionString) - d.Set("primary_connection_string_alias", keysResp.AliasPrimaryConnectionString) - d.Set("secondary_connection_string_alias", keysResp.AliasSecondaryConnectionString) + if model := keysResp.Model; model != nil { + d.Set("primary_key", model.PrimaryKey) + d.Set("secondary_key", model.SecondaryKey) + d.Set("primary_connection_string", model.PrimaryConnectionString) + d.Set("secondary_connection_string", model.SecondaryConnectionString) + d.Set("primary_connection_string_alias", model.AliasPrimaryConnectionString) + d.Set("secondary_connection_string_alias", model.AliasSecondaryConnectionString) + } return nil } diff --git a/azurerm/internal/services/eventhub/eventhub_authorization_rule_data_source_test.go b/azurerm/internal/services/eventhub/eventhub_authorization_rule_data_source_test.go index fd156692cc50..d3420256cbb1 100644 --- a/azurerm/internal/services/eventhub/eventhub_authorization_rule_data_source_test.go +++ b/azurerm/internal/services/eventhub/eventhub_authorization_rule_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -16,10 +15,10 @@ func TestAccEventHubAuthorizationRuleDataSource(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_eventhub_authorization_rule", "test") r := EventHubAuthorizationRuleDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.base(data, true, true, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("name").Exists(), check.That(data.ResourceName).Key("namespace_name").Exists(), check.That(data.ResourceName).Key("eventhub_name").Exists(), @@ -36,10 +35,10 @@ func TestAccEventHubAuthorizationRuleDataSource_withAliasConnectionString(t *tes data := acceptance.BuildTestData(t, "data.azurerm_eventhub_authorization_rule", "test") r := EventHubAuthorizationRuleDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.withAliasConnectionString(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("primary_connection_string_alias").Exists(), check.That(data.ResourceName).Key("secondary_connection_string_alias").Exists(), ), diff --git a/azurerm/internal/services/eventhub/eventhub_authorization_rule_resource.go b/azurerm/internal/services/eventhub/eventhub_authorization_rule_resource.go index 3fcf92581350..f479bbb12743 100644 --- a/azurerm/internal/services/eventhub/eventhub_authorization_rule_resource.go +++ b/azurerm/internal/services/eventhub/eventhub_authorization_rule_resource.go @@ -5,22 +5,20 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - - "github.com/Azure/azure-sdk-for-go/services/preview/eventhub/mgmt/2018-01-01-preview/eventhub" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/eventhubs" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceEventHubAuthorizationRule() *schema.Resource { - return &schema.Resource{ +func resourceEventHubAuthorizationRule() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceEventHubAuthorizationRuleCreateUpdate, Read: resourceEventHubAuthorizationRuleRead, Update: resourceEventHubAuthorizationRuleCreateUpdate, @@ -29,30 +27,30 @@ func resourceEventHubAuthorizationRule() *schema.Resource { // TODO: replace this with an importer which validates the ID during import Importer: pluginsdk.DefaultImporter(), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: eventHubAuthorizationRuleSchemaFrom(map[string]*schema.Schema{ + Schema: eventHubAuthorizationRuleSchemaFrom(map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.ValidateEventHubAuthorizationRuleName(), }, "namespace_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.ValidateEventHubNamespaceName(), }, "eventhub_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.ValidateEventHubName(), @@ -65,142 +63,138 @@ func resourceEventHubAuthorizationRule() *schema.Resource { } } -func resourceEventHubAuthorizationRuleCreateUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clients.Client).Eventhub.EventHubsClient +func resourceEventHubAuthorizationRuleCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + eventhubsClient := meta.(*clients.Client).Eventhub.EventHubsClient + authorizationRulesClient := meta.(*clients.Client).Eventhub.EventHubAuthorizationRulesClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() log.Printf("[INFO] preparing arguments for AzureRM EventHub Authorization Rule creation.") - name := d.Get("name").(string) - namespaceName := d.Get("namespace_name").(string) - eventHubName := d.Get("eventhub_name").(string) - resourceGroup := d.Get("resource_group_name").(string) - + id := eventhubs.NewAuthorizationRuleID(subscriptionId, d.Get("resource_group_name").(string), d.Get("namespace_name").(string), d.Get("eventhub_name").(string), d.Get("name").(string)) if d.IsNewResource() { - existing, err := client.GetAuthorizationRule(ctx, resourceGroup, namespaceName, eventHubName, name) + existing, err := eventhubsClient.GetAuthorizationRule(ctx, id) if err != nil { - if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("Error checking for presence of existing EventHub Authorization Rule %q (EventHub %q / Namespace %q / Resource Group %q): %s", name, eventHubName, namespaceName, resourceGroup, err) + if !response.WasNotFound(existing.HttpResponse) { + return fmt.Errorf("checking for presence of existing %s: %+v", id, err) } } - if existing.ID != nil && *existing.ID != "" { - return tf.ImportAsExistsError("azurerm_eventhub_authorization_rule", *existing.ID) + if !response.WasNotFound(existing.HttpResponse) { + return tf.ImportAsExistsError("azurerm_eventhub_authorization_rule", id.ID()) } } - locks.ByName(eventHubName, eventHubResourceName) - defer locks.UnlockByName(eventHubName, eventHubResourceName) + locks.ByName(id.EventhubName, eventHubResourceName) + defer locks.UnlockByName(id.EventhubName, eventHubResourceName) - locks.ByName(namespaceName, eventHubNamespaceResourceName) - defer locks.UnlockByName(namespaceName, eventHubNamespaceResourceName) + locks.ByName(id.NamespaceName, eventHubNamespaceResourceName) + defer locks.UnlockByName(id.NamespaceName, eventHubNamespaceResourceName) - parameters := eventhub.AuthorizationRule{ - Name: &name, - AuthorizationRuleProperties: &eventhub.AuthorizationRuleProperties{ + parameters := authorizationruleseventhubs.AuthorizationRule{ + Name: &id.Name, + Properties: &authorizationruleseventhubs.AuthorizationRuleProperties{ Rights: expandEventHubAuthorizationRuleRights(d), }, } - return resource.Retry(d.Timeout(schema.TimeoutCreate), func() *resource.RetryError { - if _, err := client.CreateOrUpdateAuthorizationRule(ctx, resourceGroup, namespaceName, eventHubName, name, parameters); err != nil { - return resource.NonRetryableError(fmt.Errorf("Error creating Authorization Rule %q (event Hub %q / Namespace %q / Resource Group %q): %+v", name, eventHubName, namespaceName, resourceGroup, err)) + //lintignore:R006 + return pluginsdk.Retry(d.Timeout(pluginsdk.TimeoutCreate), func() *pluginsdk.RetryError { + localId := authorizationruleseventhubs.NewAuthorizationRuleID(id.SubscriptionId, id.ResourceGroup, id.NamespaceName, id.EventhubName, id.Name) + if _, err := authorizationRulesClient.EventHubsCreateOrUpdateAuthorizationRule(ctx, localId, parameters); err != nil { + return pluginsdk.NonRetryableError(fmt.Errorf("creating %s: %+v", id, err)) } - read, err := client.GetAuthorizationRule(ctx, resourceGroup, namespaceName, eventHubName, name) + read, err := eventhubsClient.GetAuthorizationRule(ctx, id) if err != nil { - if utils.ResponseWasNotFound(read.Response) { - return resource.RetryableError(fmt.Errorf("Expected instance of the Authorization Rule %q (event Hub %q / Namespace %q / Resource Group %q) to be created but was in non existent state, retrying", name, eventHubName, namespaceName, resourceGroup)) + if response.WasNotFound(read.HttpResponse) { + return pluginsdk.RetryableError(fmt.Errorf("expected %s to be created but was in non existent state, retrying", id)) } - return resource.NonRetryableError(fmt.Errorf("Expected instance of Authorization Rule %q (event Hub %q / Namespace %q / Resource Group %q) could not be found", name, eventHubName, namespaceName, resourceGroup)) + return pluginsdk.NonRetryableError(fmt.Errorf("Expected %s was not be found", id)) } - if read.ID == nil { - return resource.NonRetryableError(fmt.Errorf("Cannot read Authorization Rule %q (event Hub %q / Namespace %q / Resource Group %q) ID", name, eventHubName, namespaceName, resourceGroup)) - } + d.SetId(id.ID()) - d.SetId(*read.ID) + if err := resourceEventHubAuthorizationRuleRead(d, meta); err != nil { + return pluginsdk.NonRetryableError(err) + } - return resource.NonRetryableError(resourceEventHubAuthorizationRuleRead(d, meta)) + return nil }) } -func resourceEventHubAuthorizationRuleRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clients.Client).Eventhub.EventHubsClient +func resourceEventHubAuthorizationRuleRead(d *pluginsdk.ResourceData, meta interface{}) error { + eventHubsClient := meta.(*clients.Client).Eventhub.EventHubsClient + authorizationRulesClient := meta.(*clients.Client).Eventhub.EventHubAuthorizationRulesClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := azure.ParseAzureResourceID(d.Id()) + id, err := eventhubs.AuthorizationRuleID(d.Id()) if err != nil { return err } - name := id.Path["authorizationRules"] - resourceGroup := id.ResourceGroup - namespaceName := id.Path["namespaces"] - eventHubName := id.Path["eventhubs"] - - resp, err := client.GetAuthorizationRule(ctx, resourceGroup, namespaceName, eventHubName, name) + resp, err := eventHubsClient.GetAuthorizationRule(ctx, *id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { d.SetId("") return nil } - return fmt.Errorf("Error making Read request on Azure EventHub Authorization Rule %s: %+v", name, err) + return fmt.Errorf("retrieving %s: %+v", *id, err) } - d.Set("name", name) - d.Set("eventhub_name", eventHubName) - d.Set("namespace_name", namespaceName) - d.Set("resource_group_name", resourceGroup) - - if properties := resp.AuthorizationRuleProperties; properties != nil { - listen, send, manage := flattenEventHubAuthorizationRuleRights(properties.Rights) - d.Set("manage", manage) - d.Set("listen", listen) - d.Set("send", send) + d.Set("name", id.Name) + d.Set("eventhub_name", id.EventhubName) + d.Set("namespace_name", id.NamespaceName) + d.Set("resource_group_name", id.ResourceGroup) + + if model := resp.Model; model != nil { + if properties := model.Properties; properties != nil { + listen, send, manage := flattenEventHubAuthorizationRuleRights(properties.Rights) + d.Set("manage", manage) + d.Set("listen", listen) + d.Set("send", send) + } } - keysResp, err := client.ListKeys(ctx, resourceGroup, namespaceName, eventHubName, name) + localId := authorizationruleseventhubs.NewAuthorizationRuleID(id.SubscriptionId, id.ResourceGroup, id.NamespaceName, id.EventhubName, id.Name) + keysResp, err := authorizationRulesClient.EventHubsListKeys(ctx, localId) if err != nil { - return fmt.Errorf("Error making Read request on Azure EventHub Authorization Rule List Keys %s: %+v", name, err) + return fmt.Errorf("listing keys for %s: %+v", *id, err) } - d.Set("primary_key", keysResp.PrimaryKey) - d.Set("secondary_key", keysResp.SecondaryKey) - d.Set("primary_connection_string", keysResp.PrimaryConnectionString) - d.Set("secondary_connection_string", keysResp.SecondaryConnectionString) - d.Set("primary_connection_string_alias", keysResp.AliasPrimaryConnectionString) - d.Set("secondary_connection_string_alias", keysResp.AliasSecondaryConnectionString) + if model := keysResp.Model; model != nil { + d.Set("primary_key", model.PrimaryKey) + d.Set("secondary_key", model.SecondaryKey) + d.Set("primary_connection_string", model.PrimaryConnectionString) + d.Set("secondary_connection_string", model.SecondaryConnectionString) + d.Set("primary_connection_string_alias", model.AliasPrimaryConnectionString) + d.Set("secondary_connection_string_alias", model.AliasSecondaryConnectionString) + } return nil } -func resourceEventHubAuthorizationRuleDelete(d *schema.ResourceData, meta interface{}) error { +func resourceEventHubAuthorizationRuleDelete(d *pluginsdk.ResourceData, meta interface{}) error { eventhubClient := meta.(*clients.Client).Eventhub.EventHubsClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := azure.ParseAzureResourceID(d.Id()) + id, err := eventhubs.AuthorizationRuleID(d.Id()) if err != nil { return err } - name := id.Path["authorizationRules"] - resourceGroup := id.ResourceGroup - namespaceName := id.Path["namespaces"] - eventHubName := id.Path["eventhubs"] - - locks.ByName(eventHubName, eventHubResourceName) - defer locks.UnlockByName(eventHubName, eventHubResourceName) + locks.ByName(id.EventhubName, eventHubResourceName) + defer locks.UnlockByName(id.EventhubName, eventHubResourceName) - locks.ByName(namespaceName, eventHubNamespaceResourceName) - defer locks.UnlockByName(namespaceName, eventHubNamespaceResourceName) + locks.ByName(id.NamespaceName, eventHubNamespaceResourceName) + defer locks.UnlockByName(id.NamespaceName, eventHubNamespaceResourceName) - if resp, err := eventhubClient.DeleteAuthorizationRule(ctx, resourceGroup, namespaceName, eventHubName, name); err != nil { - if !utils.ResponseWasNotFound(resp) { - return fmt.Errorf("Error issuing Azure ARM delete request of EventHub Authorization Rule '%s': %+v", name, err) + if resp, err := eventhubClient.DeleteAuthorizationRule(ctx, *id); err != nil { + if !response.WasNotFound(resp.HttpResponse) { + return fmt.Errorf("deleting %s: %+v", *id, err) } } diff --git a/azurerm/internal/services/eventhub/eventhub_authorization_rule_resource_test.go b/azurerm/internal/services/eventhub/eventhub_authorization_rule_resource_test.go index 532ae8685552..67548400a3b8 100644 --- a/azurerm/internal/services/eventhub/eventhub_authorization_rule_resource_test.go +++ b/azurerm/internal/services/eventhub/eventhub_authorization_rule_resource_test.go @@ -6,12 +6,11 @@ import ( "strconv" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/eventhubs" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -38,10 +37,10 @@ func testAccEventHubAuthorizationRule(t *testing.T, listen, send, manage bool) { data := acceptance.BuildTestData(t, "azurerm_eventhub_authorization_rule", "test") r := EventHubAuthorizationRuleResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.base(data, listen, send, manage), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").Exists(), check.That(data.ResourceName).Key("namespace_name").Exists(), @@ -65,10 +64,10 @@ func TestAccEventHubAuthorizationRule_multi(t *testing.T) { resourceTwoName := "azurerm_eventhub_authorization_rule.test2" resourceThreeName := "azurerm_eventhub_authorization_rule.test3" - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.multi(data, true, true, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("manage").HasValue("false"), check.That(data.ResourceName).Key("send").HasValue("true"), @@ -76,17 +75,17 @@ func TestAccEventHubAuthorizationRule_multi(t *testing.T) { check.That(data.ResourceName).Key("primary_connection_string").Exists(), check.That(data.ResourceName).Key("secondary_connection_string").Exists(), check.That(resourceTwoName).ExistsInAzure(r), - resource.TestCheckResourceAttr(resourceTwoName, "manage", "false"), - resource.TestCheckResourceAttr(resourceTwoName, "send", "true"), - resource.TestCheckResourceAttr(resourceTwoName, "listen", "true"), - resource.TestCheckResourceAttrSet(resourceTwoName, "primary_connection_string"), - resource.TestCheckResourceAttrSet(resourceTwoName, "secondary_connection_string"), + acceptance.TestCheckResourceAttr(resourceTwoName, "manage", "false"), + acceptance.TestCheckResourceAttr(resourceTwoName, "send", "true"), + acceptance.TestCheckResourceAttr(resourceTwoName, "listen", "true"), + acceptance.TestCheckResourceAttrSet(resourceTwoName, "primary_connection_string"), + acceptance.TestCheckResourceAttrSet(resourceTwoName, "secondary_connection_string"), check.That(resourceThreeName).ExistsInAzure(r), - resource.TestCheckResourceAttr(resourceThreeName, "manage", "false"), - resource.TestCheckResourceAttr(resourceThreeName, "send", "true"), - resource.TestCheckResourceAttr(resourceThreeName, "listen", "true"), - resource.TestCheckResourceAttrSet(resourceThreeName, "primary_connection_string"), - resource.TestCheckResourceAttrSet(resourceThreeName, "secondary_connection_string"), + acceptance.TestCheckResourceAttr(resourceThreeName, "manage", "false"), + acceptance.TestCheckResourceAttr(resourceThreeName, "send", "true"), + acceptance.TestCheckResourceAttr(resourceThreeName, "listen", "true"), + acceptance.TestCheckResourceAttrSet(resourceThreeName, "primary_connection_string"), + acceptance.TestCheckResourceAttrSet(resourceThreeName, "secondary_connection_string"), ), }, data.ImportStep(), @@ -107,10 +106,10 @@ func TestAccEventHubAuthorizationRule_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_authorization_rule", "test") r := EventHubAuthorizationRuleResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.base(data, true, true, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -125,10 +124,10 @@ func TestAccEventHubAuthorizationRule_rightsUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_authorization_rule", "test") r := EventHubAuthorizationRuleResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.base(data, true, false, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("listen").HasValue("true"), check.That(data.ResourceName).Key("send").HasValue("false"), @@ -137,7 +136,7 @@ func TestAccEventHubAuthorizationRule_rightsUpdate(t *testing.T) { }, { Config: r.base(data, true, true, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").Exists(), check.That(data.ResourceName).Key("namespace_name").Exists(), @@ -158,10 +157,10 @@ func TestAccEventHubAuthorizationRule_withAliasConnectionString(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_authorization_rule", "test") r := EventHubAuthorizationRuleResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.withAliasConnectionString(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("primary_connection_string_alias").Exists(), check.That(data.ResourceName).Key("secondary_connection_string_alias").Exists(), @@ -171,22 +170,18 @@ func TestAccEventHubAuthorizationRule_withAliasConnectionString(t *testing.T) { }) } -func (EventHubAuthorizationRuleResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { - id, err := azure.ParseAzureResourceID(state.ID) +func (EventHubAuthorizationRuleResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := eventhubs.AuthorizationRuleID(state.ID) if err != nil { return nil, err } - name := id.Path["authorizationRules"] - namespaceName := id.Path["namespaces"] - eventHubName := id.Path["eventhubs"] - - resp, err := clients.Eventhub.EventHubsClient.GetAuthorizationRule(ctx, id.ResourceGroup, namespaceName, eventHubName, name) + resp, err := clients.Eventhub.EventHubsClient.GetAuthorizationRule(ctx, *id) if err != nil { - return nil, fmt.Errorf("retrieving Event Hub Authorization Rule %q (eventhub %s / namespace %s / resource group: %s) does not exist", name, eventHubName, namespaceName, id.ResourceGroup) + return nil, fmt.Errorf("retrieving %s: %+v", *id, err) } - return utils.Bool(resp.AuthorizationRuleProperties != nil), nil + return utils.Bool(resp.Model != nil), nil } func (EventHubAuthorizationRuleResource) base(data acceptance.TestData, listen, send, manage bool) string { diff --git a/azurerm/internal/services/eventhub/eventhub_cluster_data_source.go b/azurerm/internal/services/eventhub/eventhub_cluster_data_source.go index 14fe0de5bedb..f29a2e4e3e8e 100644 --- a/azurerm/internal/services/eventhub/eventhub_cluster_data_source.go +++ b/azurerm/internal/services/eventhub/eventhub_cluster_data_source.go @@ -4,25 +4,26 @@ import ( "fmt" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/eventhubsclusters" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceEventHubCluster() *schema.Resource { - return &schema.Resource{ +func dataSourceEventHubCluster() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceEventHubClusterRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, @@ -31,14 +32,14 @@ func dataSourceEventHubCluster() *schema.Resource { "location": azure.SchemaLocationForDataSource(), "sku_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, } } -func dataSourceEventHubClusterRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceEventHubClusterRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Eventhub.ClusterClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -47,21 +48,22 @@ func dataSourceEventHubClusterRead(d *schema.ResourceData, meta interface{}) err resourceGroup := d.Get("resource_group_name").(string) subscriptionId := meta.(*clients.Client).Account.SubscriptionId - id := parse.NewClusterID(subscriptionId, resourceGroup, name) - resp, err := client.Get(ctx, resourceGroup, name) + id := eventhubsclusters.NewClusterID(subscriptionId, resourceGroup, name) + resp, err := client.ClustersGet(ctx, id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { return fmt.Errorf("%s was not found", id) } return fmt.Errorf("making Read request on Azure EventHub Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } d.SetId(id.ID()) - d.Set("name", resp.Name) - d.Set("resource_group_name", resourceGroup) - d.Set("sku_name", flattenEventHubClusterSkuName(resp.Sku)) - if location := resp.Location; location != nil { - d.Set("location", azure.NormalizeLocation(*location)) + d.Set("name", id.Name) + d.Set("resource_group_name", id.ResourceGroup) + + if model := resp.Model; model != nil { + d.Set("sku_name", flattenEventHubClusterSkuName(model.Sku)) + d.Set("location", location.NormalizeNilable(model.Location)) } return nil diff --git a/azurerm/internal/services/eventhub/eventhub_cluster_data_source_test.go b/azurerm/internal/services/eventhub/eventhub_cluster_data_source_test.go index eacd5f84ec6d..d5562506af69 100644 --- a/azurerm/internal/services/eventhub/eventhub_cluster_data_source_test.go +++ b/azurerm/internal/services/eventhub/eventhub_cluster_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -16,10 +15,10 @@ func TestAccEventHubClusterDataSource_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_eventhub_cluster", "test") r := EventHubClusterDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("sku_name").HasValue("Dedicated_1"), ), }, diff --git a/azurerm/internal/services/eventhub/eventhub_cluster_resource.go b/azurerm/internal/services/eventhub/eventhub_cluster_resource.go index aff71fee9ff2..094b7dcec39d 100644 --- a/azurerm/internal/services/eventhub/eventhub_cluster_resource.go +++ b/azurerm/internal/services/eventhub/eventhub_cluster_resource.go @@ -7,44 +7,42 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/preview/eventhub/mgmt/2018-01-01-preview/eventhub" "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/eventhubsclusters" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceEventHubCluster() *schema.Resource { - return &schema.Resource{ +func resourceEventHubCluster() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceEventHubClusterCreateUpdate, Read: resourceEventHubClusterRead, Update: resourceEventHubClusterCreateUpdate, Delete: resourceEventHubClusterDelete, Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { - _, err := parse.ClusterID(id) + _, err := eventhubsclusters.ClusterID(id) return err }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), // You can't delete a cluster until at least 4 hours have passed from the initial creation. - Delete: schema.DefaultTimeout(300 * time.Minute), + Delete: pluginsdk.DefaultTimeout(300 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.ValidateEventHubName(), @@ -55,7 +53,7 @@ func resourceEventHubCluster() *schema.Resource { "location": azure.SchemaLocation(), "sku_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringMatch( @@ -69,119 +67,109 @@ func resourceEventHubCluster() *schema.Resource { } } -func resourceEventHubClusterCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceEventHubClusterCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Eventhub.ClusterClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() log.Printf("[INFO] preparing arguments for Azure ARM EventHub Cluster creation.") - name := d.Get("name").(string) - resourceGroup := d.Get("resource_group_name").(string) - + id := eventhubsclusters.NewClusterID(subscriptionId, d.Get("resource_group_name").(string), d.Get("name").(string)) if d.IsNewResource() { - existing, err := client.Get(ctx, resourceGroup, name) + existing, err := client.ClustersGet(ctx, id) if err != nil { - if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("Error checking for presence of existing EventHub Cluster %q (Resource Group %q): %s", name, resourceGroup, err) + if !response.WasNotFound(existing.HttpResponse) { + return fmt.Errorf("checking for presence of existing %s: %+v", id, err) } } - if existing.ID != nil && *existing.ID != "" { - return tf.ImportAsExistsError("azurerm_eventhub_cluster", *existing.ID) + if !response.WasNotFound(existing.HttpResponse) { + return tf.ImportAsExistsError("azurerm_eventhub_cluster", id.ID()) } } - cluster := eventhub.Cluster{ + cluster := eventhubsclusters.Cluster{ Location: utils.String(azure.NormalizeLocation(d.Get("location").(string))), - Tags: tags.Expand(d.Get("tags").(map[string]interface{})), + Tags: expandTags(d.Get("tags").(map[string]interface{})), Sku: expandEventHubClusterSkuName(d.Get("sku_name").(string)), } - future, err := client.CreateOrUpdate(ctx, resourceGroup, name, cluster) - if err != nil { - return fmt.Errorf("creating EventHub Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) - } - - if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("waiting for creation of EventHub Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + if err := client.ClustersCreateOrUpdateThenPoll(ctx, id, cluster); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) } - read, err := client.Get(ctx, resourceGroup, name) - if err != nil { - return fmt.Errorf("making Read request on Azure EventHub Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) - } - - if read.ID == nil || *read.ID == "" { - return fmt.Errorf("cannot read EventHub Cluster %s (Resource Group %s) ID", name, resourceGroup) + if d.IsNewResource() { + d.SetId(id.ID()) } - d.SetId(*read.ID) - return resourceEventHubClusterRead(d, meta) } -func resourceEventHubClusterRead(d *schema.ResourceData, meta interface{}) error { +func resourceEventHubClusterRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Eventhub.ClusterClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.ClusterID(d.Id()) + id, err := eventhubsclusters.ClusterID(d.Id()) if err != nil { return err } - resp, err := client.Get(ctx, id.ResourceGroup, id.Name) + resp, err := client.ClustersGet(ctx, *id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { d.SetId("") return nil } - return fmt.Errorf("making Read request on Azure EventHub Cluster %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + return fmt.Errorf("retrieving %s: %+v", *id, err) } - d.Set("name", resp.Name) + d.Set("name", id.Name) d.Set("resource_group_name", id.ResourceGroup) - d.Set("sku_name", flattenEventHubClusterSkuName(resp.Sku)) - if location := resp.Location; location != nil { - d.Set("location", azure.NormalizeLocation(*location)) + + if model := resp.Model; model != nil { + d.Set("sku_name", flattenEventHubClusterSkuName(model.Sku)) + d.Set("location", location.NormalizeNilable(model.Location)) + + return tags.FlattenAndSet(d, flattenTags(model.Tags)) } - return tags.FlattenAndSet(d, resp.Tags) + return nil } -func resourceEventHubClusterDelete(d *schema.ResourceData, meta interface{}) error { +func resourceEventHubClusterDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Eventhub.ClusterClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.ClusterID(d.Id()) + id, err := eventhubsclusters.ClusterID(d.Id()) if err != nil { return err } // The EventHub Cluster can't be deleted until four hours after creation so we'll keep retrying until it can be deleted. - return resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { - future, err := client.Delete(ctx, id.ResourceGroup, id.Name) + return pluginsdk.Retry(d.Timeout(pluginsdk.TimeoutDelete), func() *pluginsdk.RetryError { + future, err := client.ClustersDelete(ctx, *id) if err != nil { - if response.WasNotFound(future.Response()) { + if response.WasNotFound(future.HttpResponse) { return nil } - if strings.Contains(err.Error(), "Cluster cannot be deleted until four hours after its creation time") || future.Response().StatusCode == 429 { - return resource.RetryableError(fmt.Errorf("expected eventhub cluster to be deleted but was in pending creation state, retrying")) + if strings.Contains(err.Error(), "Cluster cannot be deleted until four hours after its creation time") || future.HttpResponse.StatusCode == 429 { + return pluginsdk.RetryableError(fmt.Errorf("expected eventhub cluster to be deleted but was in pending creation state, retrying")) } - return resource.NonRetryableError(fmt.Errorf("issuing delete request for EventHub Cluster %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err)) + return pluginsdk.NonRetryableError(fmt.Errorf("deleting %s: %+v", *id, err)) } - if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { - if future.Response().StatusCode == 404 { + if err := future.Poller.PollUntilDone(); err != nil { + if response.WasNotFound(future.Poller.HttpResponse) { return nil } - return resource.NonRetryableError(fmt.Errorf("deleting EventHub Cluster %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err)) + return pluginsdk.NonRetryableError(fmt.Errorf("deleting %s: %+v", *id, err)) } return nil - }) + }) //lintignore:R006 } -func expandEventHubClusterSkuName(skuName string) *eventhub.ClusterSku { +func expandEventHubClusterSkuName(skuName string) *eventhubsclusters.ClusterSku { if len(skuName) == 0 { return nil } @@ -191,16 +179,16 @@ func expandEventHubClusterSkuName(skuName string) *eventhub.ClusterSku { return nil } - return &eventhub.ClusterSku{ - Name: utils.String(name), - Capacity: utils.Int32(capacity), + return &eventhubsclusters.ClusterSku{ + Name: eventhubsclusters.ClusterSkuName(name), + Capacity: utils.Int64(int64(capacity)), } } -func flattenEventHubClusterSkuName(input *eventhub.ClusterSku) string { - if input == nil || input.Name == nil { +func flattenEventHubClusterSkuName(input *eventhubsclusters.ClusterSku) string { + if input == nil || input.Capacity == nil { return "" } - return fmt.Sprintf("%s_%d", *input.Name, *input.Capacity) + return fmt.Sprintf("%s_%d", string(input.Name), *input.Capacity) } diff --git a/azurerm/internal/services/eventhub/eventhub_cluster_resource_test.go b/azurerm/internal/services/eventhub/eventhub_cluster_resource_test.go index d3bc7846489a..94fabcb0c6c1 100644 --- a/azurerm/internal/services/eventhub/eventhub_cluster_resource_test.go +++ b/azurerm/internal/services/eventhub/eventhub_cluster_resource_test.go @@ -5,12 +5,11 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/eventhubsclusters" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -21,10 +20,10 @@ func TestAccEventHubCluster_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_cluster", "test") r := EventHubClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -36,24 +35,24 @@ func TestAccEventHubCluster_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_cluster", "test") r := EventHubClusterResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.update(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -61,18 +60,18 @@ func TestAccEventHubCluster_update(t *testing.T) { }) } -func (EventHubClusterResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { - id, err := parse.ClusterID(state.ID) +func (EventHubClusterResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := eventhubsclusters.ClusterID(state.ID) if err != nil { return nil, err } - resp, err := clients.Eventhub.ClusterClient.Get(ctx, id.ResourceGroup, id.Name) + resp, err := clients.Eventhub.ClusterClient.ClustersGet(ctx, *id) if err != nil { return nil, fmt.Errorf("retrieving %s: %v", id.String(), err) } - return utils.Bool(resp.ClusterProperties != nil), nil + return utils.Bool(resp.Model != nil), nil } func (EventHubClusterResource) basic(data acceptance.TestData) string { diff --git a/azurerm/internal/services/eventhub/eventhub_consumer_group_data_source.go b/azurerm/internal/services/eventhub/eventhub_consumer_group_data_source.go index 713d1b9951bc..c6849849ba46 100644 --- a/azurerm/internal/services/eventhub/eventhub_consumer_group_data_source.go +++ b/azurerm/internal/services/eventhub/eventhub_consumer_group_data_source.go @@ -4,26 +4,27 @@ import ( "fmt" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/consumergroups" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func EventHubConsumerGroupDataSource() *schema.Resource { - return &schema.Resource{ +func EventHubConsumerGroupDataSource() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: EventHubConsumerGroupDataSourceRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.Any( validate.ValidateEventHubConsumerName(), @@ -32,13 +33,13 @@ func EventHubConsumerGroupDataSource() *schema.Resource { }, "namespace_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.ValidateEventHubNamespaceName(), }, "eventhub_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.ValidateEventHubName(), }, @@ -48,40 +49,40 @@ func EventHubConsumerGroupDataSource() *schema.Resource { "location": azure.SchemaLocationForDataSource(), "user_metadata": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, } } -func EventHubConsumerGroupDataSourceRead(d *schema.ResourceData, meta interface{}) error { +func EventHubConsumerGroupDataSourceRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Eventhub.ConsumerGroupClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - name := d.Get("name").(string) - resourceGroup := d.Get("resource_group_name").(string) - eventHubName := d.Get("eventhub_name").(string) - namespaceName := d.Get("namespace_name").(string) + id := consumergroups.NewConsumergroupID(subscriptionId, d.Get("resource_group_name").(string), d.Get("namespace_name").(string), d.Get("eventhub_name").(string), d.Get("name").(string)) - resp, err := client.Get(ctx, resourceGroup, namespaceName, eventHubName, name) + resp, err := client.Get(ctx, id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { - return fmt.Errorf("Error: EventHub Consumer Group %q (Resource Group %q) was not found", name, resourceGroup) + if response.WasNotFound(resp.HttpResponse) { + return fmt.Errorf("%s was not found", id) } - return fmt.Errorf("Error: EventHub Consumer Group %s: %+v", name, err) + return fmt.Errorf("retrieving %s: %+v", id, err) } - d.SetId(*resp.ID) + d.SetId(id.ID()) - d.Set("name", name) - d.Set("eventhub_name", eventHubName) - d.Set("namespace_name", namespaceName) - d.Set("resource_group_name", resourceGroup) + d.Set("name", id.Name) + d.Set("eventhub_name", id.EventhubName) + d.Set("namespace_name", id.NamespaceName) + d.Set("resource_group_name", id.ResourceGroup) - if resp.ConsumerGroupProperties != nil { - d.Set("user_metadata", resp.ConsumerGroupProperties.UserMetadata) + if model := resp.Model; model != nil { + if props := model.Properties; props != nil { + d.Set("user_metadata", props.UserMetadata) + } } return nil diff --git a/azurerm/internal/services/eventhub/eventhub_consumer_group_data_source_test.go b/azurerm/internal/services/eventhub/eventhub_consumer_group_data_source_test.go index 83168488b225..27ae847becf7 100644 --- a/azurerm/internal/services/eventhub/eventhub_consumer_group_data_source_test.go +++ b/azurerm/internal/services/eventhub/eventhub_consumer_group_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -16,10 +15,10 @@ func TestAccEventHubConsumerGroupDataSource_complete(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_eventhub_consumer_group", "test") r := EventHubConsumerGroupDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.complete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("user_metadata").HasValue("some-meta-data"), ), }, @@ -30,10 +29,10 @@ func TestAccEventHubConsumerGroupDataSource_completeDefault(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_eventhub_consumer_group", "test") r := EventHubConsumerGroupDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.completeDefault(data), - Check: resource.ComposeTestCheckFunc(), + Check: acceptance.ComposeTestCheckFunc(), }, }) } diff --git a/azurerm/internal/services/eventhub/eventhub_consumer_group_resource.go b/azurerm/internal/services/eventhub/eventhub_consumer_group_resource.go index 79dd6a09f80f..b7e973bc5a36 100644 --- a/azurerm/internal/services/eventhub/eventhub_consumer_group_resource.go +++ b/azurerm/internal/services/eventhub/eventhub_consumer_group_resource.go @@ -5,15 +5,13 @@ import ( "fmt" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - - "github.com/Azure/azure-sdk-for-go/services/preview/eventhub/mgmt/2018-01-01-preview/eventhub" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/sdk" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/consumergroups" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -35,24 +33,24 @@ func (r ConsumerGroupResource) ResourceType() string { return "azurerm_eventhub_consumer_group" } -func (r ConsumerGroupResource) Arguments() map[string]*schema.Schema { - return map[string]*schema.Schema{ +func (r ConsumerGroupResource) Arguments() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.ValidateEventHubConsumerName(), }, "namespace_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.ValidateEventHubNamespaceName(), }, "eventhub_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.ValidateEventHubName(), @@ -61,15 +59,15 @@ func (r ConsumerGroupResource) Arguments() map[string]*schema.Schema { "resource_group_name": azure.SchemaResourceGroupName(), "user_metadata": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringLenBetween(1, 1024), }, } } -func (r ConsumerGroupResource) Attributes() map[string]*schema.Schema { - return map[string]*schema.Schema{} +func (r ConsumerGroupResource) Attributes() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{} } func (r ConsumerGroupResource) Create() sdk.ResourceFunc { @@ -85,24 +83,24 @@ func (r ConsumerGroupResource) Create() sdk.ResourceFunc { client := metadata.Client.Eventhub.ConsumerGroupClient subscriptionId := metadata.Client.Account.SubscriptionId - id := parse.NewEventHubConsumerGroupID(subscriptionId, state.ResourceGroupName, state.NamespaceName, state.EventHubName, state.Name) - existing, err := client.Get(ctx, state.ResourceGroupName, state.NamespaceName, state.EventHubName, state.Name) - if err != nil && !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("checking for the presence of an existing Consumer Group %q: %+v", state.Name, err) + id := consumergroups.NewConsumergroupID(subscriptionId, state.ResourceGroupName, state.NamespaceName, state.EventHubName, state.Name) + existing, err := client.Get(ctx, id) + if err != nil && !response.WasNotFound(existing.HttpResponse) { + return fmt.Errorf("checking for the presence of an existing %s: %+v", id, err) } - if !utils.ResponseWasNotFound(existing.Response) { + if !response.WasNotFound(existing.HttpResponse) { return metadata.ResourceRequiresImport(r.ResourceType(), id) } - parameters := eventhub.ConsumerGroup{ + parameters := consumergroups.ConsumerGroup{ Name: utils.String(state.Name), - ConsumerGroupProperties: &eventhub.ConsumerGroupProperties{ + Properties: &consumergroups.ConsumerGroupProperties{ UserMetadata: utils.String(state.UserMetadata), }, } - if _, err := client.CreateOrUpdate(ctx, state.ResourceGroupName, state.NamespaceName, state.EventHubName, state.Name, parameters); err != nil { - return fmt.Errorf("creating Consumer Group %q (EventHub %q / Namespace %q / Resource Group %q): %+v", state.Name, state.EventHubName, state.NamespaceName, state.ResourceGroupName, err) + if _, err := client.CreateOrUpdate(ctx, id, parameters); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) } metadata.SetID(id) @@ -115,7 +113,7 @@ func (r ConsumerGroupResource) Create() sdk.ResourceFunc { func (r ConsumerGroupResource) Update() sdk.ResourceFunc { return sdk.ResourceFunc{ Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { - id, err := parse.EventHubConsumerGroupID(metadata.ResourceData.Id()) + id, err := consumergroups.ConsumergroupID(metadata.ResourceData.Id()) if err != nil { return err } @@ -129,15 +127,15 @@ func (r ConsumerGroupResource) Update() sdk.ResourceFunc { metadata.Logger.Infof("updating Consumer Group %q..", state.Name) client := metadata.Client.Eventhub.ConsumerGroupClient - parameters := eventhub.ConsumerGroup{ - Name: utils.String(id.ConsumergroupName), - ConsumerGroupProperties: &eventhub.ConsumerGroupProperties{ + parameters := consumergroups.ConsumerGroup{ + Name: utils.String(id.Name), + Properties: &consumergroups.ConsumerGroupProperties{ UserMetadata: utils.String(state.UserMetadata), }, } - if _, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.NamespaceName, id.EventhubName, id.ConsumergroupName, parameters); err != nil { - return fmt.Errorf("updating Consumer Group %q (EventHub %q / Namespace %q / Resource Group %q): %+v", id.ConsumergroupName, id.EventhubName, id.NamespaceName, id.ResourceGroup, err) + if _, err := client.CreateOrUpdate(ctx, *id, parameters); err != nil { + return fmt.Errorf("updating %s: %+v", *id, err) } return nil @@ -150,29 +148,29 @@ func (r ConsumerGroupResource) Read() sdk.ResourceFunc { return sdk.ResourceFunc{ Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { client := metadata.Client.Eventhub.ConsumerGroupClient - id, err := parse.EventHubConsumerGroupID(metadata.ResourceData.Id()) + id, err := consumergroups.ConsumergroupID(metadata.ResourceData.Id()) if err != nil { return err } - metadata.Logger.Infof("retrieving Consumer Group %q..", id.ConsumergroupName) - resp, err := client.Get(ctx, id.ResourceGroup, id.NamespaceName, id.EventhubName, id.ConsumergroupName) + metadata.Logger.Infof("retrieving Consumer Group %q..", id.Name) + resp, err := client.Get(ctx, *id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { return metadata.MarkAsGone(id) } - return fmt.Errorf("reading Consumer Group %q (EventHub %q / Namespace %q / Resource Group %q): %+v", id.ConsumergroupName, id.EventhubName, id.NamespaceName, id.ResourceGroup, err) + return fmt.Errorf("retrieving %s: %+v", id, err) } state := ConsumerGroupObject{ - Name: id.ConsumergroupName, + Name: id.Name, NamespaceName: id.NamespaceName, EventHubName: id.EventhubName, ResourceGroupName: id.ResourceGroup, } - if props := resp.ConsumerGroupProperties; props != nil { - state.UserMetadata = utils.NormalizeNilableString(props.UserMetadata) + if model := resp.Model; model != nil && model.Properties != nil { + state.UserMetadata = utils.NormalizeNilableString(model.Properties.UserMetadata) } return metadata.Encode(&state) @@ -185,15 +183,15 @@ func (r ConsumerGroupResource) Delete() sdk.ResourceFunc { return sdk.ResourceFunc{ Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { client := metadata.Client.Eventhub.ConsumerGroupClient - id, err := parse.EventHubConsumerGroupID(metadata.ResourceData.Id()) + id, err := consumergroups.ConsumergroupID(metadata.ResourceData.Id()) if err != nil { return err } - metadata.Logger.Infof("deleting Consumer Group %q..", id.ConsumergroupName) - if resp, err := client.Delete(ctx, id.ResourceGroup, id.NamespaceName, id.EventhubName, id.ConsumergroupName); err != nil { - if !utils.ResponseWasNotFound(resp) { - return fmt.Errorf("deleting Consumer Group %q (EventHub %q / Namespace %q / Resource Group %q): %+v", id.ConsumergroupName, id.EventhubName, id.NamespaceName, id.ResourceGroup, err) + metadata.Logger.Infof("deleting Consumer Group %q..", id.Name) + if resp, err := client.Delete(ctx, *id); err != nil { + if !response.WasNotFound(resp.HttpResponse) { + return fmt.Errorf("deleting %s: %+v", id, err) } } diff --git a/azurerm/internal/services/eventhub/eventhub_consumer_group_resource_test.go b/azurerm/internal/services/eventhub/eventhub_consumer_group_resource_test.go index 5f2ad1ba2d5c..077dd79aac92 100644 --- a/azurerm/internal/services/eventhub/eventhub_consumer_group_resource_test.go +++ b/azurerm/internal/services/eventhub/eventhub_consumer_group_resource_test.go @@ -5,12 +5,11 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/consumergroups" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -21,10 +20,10 @@ func TestAccEventHubConsumerGroup_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_consumer_group", "test") r := EventHubConsumerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -36,10 +35,10 @@ func TestAccEventHubConsumerGroup_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_consumer_group", "test") r := EventHubConsumerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -54,10 +53,10 @@ func TestAccEventHubConsumerGroup_complete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_consumer_group", "test") r := EventHubConsumerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.complete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -69,16 +68,16 @@ func TestAccEventHubConsumerGroup_userMetadataUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_consumer_group", "test") r := EventHubConsumerGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Config: r.complete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("user_metadata").HasValue("some-meta-data"), ), @@ -87,18 +86,18 @@ func TestAccEventHubConsumerGroup_userMetadataUpdate(t *testing.T) { }) } -func (EventHubConsumerGroupResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { - id, err := parse.EventHubConsumerGroupID(state.ID) +func (EventHubConsumerGroupResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := consumergroups.ConsumergroupID(state.ID) if err != nil { return nil, err } - resp, err := clients.Eventhub.ConsumerGroupClient.Get(ctx, id.ResourceGroup, id.NamespaceName, id.EventhubName, id.ConsumergroupName) + resp, err := clients.Eventhub.ConsumerGroupClient.Get(ctx, *id) if err != nil { return nil, fmt.Errorf("retrieving %s: %v", id.String(), err) } - return utils.Bool(resp.ConsumerGroupProperties != nil), nil + return utils.Bool(resp.Model != nil), nil } func (EventHubConsumerGroupResource) basic(data acceptance.TestData) string { diff --git a/azurerm/internal/services/eventhub/eventhub_data_source.go b/azurerm/internal/services/eventhub/eventhub_data_source.go index 69a27e6f8cab..1d136005be80 100644 --- a/azurerm/internal/services/eventhub/eventhub_data_source.go +++ b/azurerm/internal/services/eventhub/eventhub_data_source.go @@ -4,75 +4,74 @@ import ( "fmt" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/eventhubs" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func dataSourceEventHub() *schema.Resource { - return &schema.Resource{ +func dataSourceEventHub() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: dataSourceEventHubRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "namespace_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "resource_group_name": azure.SchemaResourceGroupNameForDataSource(), "partition_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "partition_ids": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, }, }, } } -func dataSourceEventHubRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceEventHubRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Eventhub.EventHubsClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - name := d.Get("name").(string) - resourceGroup := d.Get("resource_group_name").(string) - namespaceName := d.Get("namespace_name").(string) - - resp, err := client.Get(ctx, resourceGroup, namespaceName, name) + id := eventhubs.NewEventhubID(subscriptionId, d.Get("resource_group_name").(string), d.Get("namespace_name").(string), d.Get("name").(string)) + resp, err := client.Get(ctx, id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { - return fmt.Errorf("Error: EventHub %q (Resource Group %q / Namespace Name %q) was not found", name, resourceGroup, namespaceName) + if response.WasNotFound(resp.HttpResponse) { + return fmt.Errorf("%s was not found", id) } - return fmt.Errorf("Error making Read request on EventHub %q (Resource Group %q / Namespace Name %q): %+v", name, resourceGroup, namespaceName, err) + return fmt.Errorf("retrieving %s: %+v", id, err) } - d.SetId(*resp.ID) + d.SetId(id.ID()) - d.Set("name", name) - d.Set("namespace_name", namespaceName) - d.Set("resource_group_name", resourceGroup) + d.Set("name", id.Name) + d.Set("namespace_name", id.NamespaceName) + d.Set("resource_group_name", id.ResourceGroup) - if props := resp.Properties; props != nil { - d.Set("partition_count", props.PartitionCount) - d.Set("partition_ids", props.PartitionIds) + if model := resp.Model; model != nil && model.Properties != nil { + d.Set("partition_count", model.Properties.PartitionCount) + d.Set("partition_ids", model.Properties.PartitionIds) } return nil diff --git a/azurerm/internal/services/eventhub/eventhub_data_source_test.go b/azurerm/internal/services/eventhub/eventhub_data_source_test.go index feba670fb53d..2152c340a8e6 100644 --- a/azurerm/internal/services/eventhub/eventhub_data_source_test.go +++ b/azurerm/internal/services/eventhub/eventhub_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -16,10 +15,10 @@ func TestAccEventHubDataSource_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_eventhub", "test") r := EventHubDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("partition_count").HasValue("2"), check.That(data.ResourceName).Key("partition_ids.#").HasValue("2"), ), diff --git a/azurerm/internal/services/eventhub/eventhub_namespace_authorization_rule_data_source.go b/azurerm/internal/services/eventhub/eventhub_namespace_authorization_rule_data_source.go index 12117cfe8e6c..73e12eba00b8 100644 --- a/azurerm/internal/services/eventhub/eventhub_namespace_authorization_rule_data_source.go +++ b/azurerm/internal/services/eventhub/eventhub_namespace_authorization_rule_data_source.go @@ -4,31 +4,32 @@ import ( "fmt" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func EventHubNamespaceDataSourceAuthorizationRule() *schema.Resource { - return &schema.Resource{ +func EventHubNamespaceDataSourceAuthorizationRule() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: EventHubNamespaceDataSourceAuthorizationRuleRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.ValidateEventHubAuthorizationRuleName(), }, "namespace_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.ValidateEventHubNamespaceName(), }, @@ -36,103 +37,102 @@ func EventHubNamespaceDataSourceAuthorizationRule() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupNameForDataSource(), "listen": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "manage": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "primary_connection_string": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "primary_connection_string_alias": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "primary_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "secondary_connection_string": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "secondary_connection_string_alias": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "secondary_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "send": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, }, } } -func EventHubNamespaceDataSourceAuthorizationRuleRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clients.Client).Eventhub.NamespacesClient +func EventHubNamespaceDataSourceAuthorizationRuleRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Eventhub.NamespaceAuthorizationRulesClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - name := d.Get("name").(string) - resourceGroup := d.Get("resource_group_name").(string) - namespaceName := d.Get("namespace_name").(string) - - resp, err := client.GetAuthorizationRule(ctx, resourceGroup, namespaceName, name) + id := authorizationrulesnamespaces.NewAuthorizationRuleID(subscriptionId, d.Get("resource_group_name").(string), d.Get("namespace_name").(string), d.Get("name").(string)) + resp, err := client.NamespacesGetAuthorizationRule(ctx, id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { - return fmt.Errorf("EventHub Authorization Rule %q (Resource Group %q / Namespace Name %q) was not found", name, resourceGroup, namespaceName) + if response.WasNotFound(resp.HttpResponse) { + return fmt.Errorf("%s was not found", id) } - return fmt.Errorf("retrieving EventHub Authorization Rule %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("retrieving %s: %+v", id, err) } - if resp.ID == nil || *resp.ID == "" { - return fmt.Errorf("retrieving EventHub Authorization Rule %q (Resource Group %q): `id` was nil", name, resourceGroup) - } - d.SetId(*resp.ID) + d.SetId(id.ID()) - d.Set("name", name) - d.Set("namespace_name", namespaceName) - d.Set("resource_group_name", resourceGroup) + d.Set("name", id.Name) + d.Set("namespace_name", id.NamespaceName) + d.Set("resource_group_name", id.ResourceGroup) - if props := resp.AuthorizationRuleProperties; props != nil { - listen, send, manage := flattenEventHubAuthorizationRuleRights(props.Rights) - d.Set("manage", manage) - d.Set("listen", listen) - d.Set("send", send) + if model := resp.Model; model != nil { + if props := model.Properties; props != nil { + listen, send, manage := flattenEventHubAuthorizationRuleRights(props.Rights) + d.Set("manage", manage) + d.Set("listen", listen) + d.Set("send", send) + } } - keysResp, err := client.ListKeys(ctx, resourceGroup, namespaceName, name) + keysResp, err := client.NamespacesListKeys(ctx, id) if err != nil { - return fmt.Errorf("Error making Read request on Azure EventHub Authorization Rule List Keys %s: %+v", name, err) + return fmt.Errorf("listing keys for %s: %+v", id, err) } - d.Set("primary_key", keysResp.PrimaryKey) - d.Set("secondary_key", keysResp.SecondaryKey) - d.Set("primary_connection_string", keysResp.PrimaryConnectionString) - d.Set("secondary_connection_string", keysResp.SecondaryConnectionString) - d.Set("primary_connection_string_alias", keysResp.AliasPrimaryConnectionString) - d.Set("secondary_connection_string_alias", keysResp.AliasSecondaryConnectionString) + if model := keysResp.Model; model != nil { + d.Set("primary_key", model.PrimaryKey) + d.Set("secondary_key", model.SecondaryKey) + d.Set("primary_connection_string", model.PrimaryConnectionString) + d.Set("secondary_connection_string", model.SecondaryConnectionString) + d.Set("primary_connection_string_alias", model.AliasPrimaryConnectionString) + d.Set("secondary_connection_string_alias", model.AliasSecondaryConnectionString) + } return nil } diff --git a/azurerm/internal/services/eventhub/eventhub_namespace_authorization_rule_data_source_test.go b/azurerm/internal/services/eventhub/eventhub_namespace_authorization_rule_data_source_test.go index 1ed6e0c9505e..52ee85f66b18 100644 --- a/azurerm/internal/services/eventhub/eventhub_namespace_authorization_rule_data_source_test.go +++ b/azurerm/internal/services/eventhub/eventhub_namespace_authorization_rule_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -16,10 +15,10 @@ func TestAccEventHubNamespaceAuthorizationRuleDataSource_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_eventhub_namespace_authorization_rule", "test") r := EventHubNamespaceAuthorizationRuleDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic(data, true, true, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("listen").Exists(), check.That(data.ResourceName).Key("manage").Exists(), check.That(data.ResourceName).Key("send").Exists(), @@ -32,9 +31,9 @@ func TestAccEventHubNamespaceAuthorizationRuleDataSource_withAliasConnectionStri data := acceptance.BuildTestData(t, "data.azurerm_eventhub_namespace_authorization_rule", "test") r := EventHubNamespaceAuthorizationRuleDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { - // `primary_connection_string_alias` and `secondary_connection_string_alias` are still `nil` while `data.azurerm_eventhub_namespace_authorization_rule` is retrieving resource. since `azurerm_eventhub_namespace_disaster_recovery_config` hasn't been created. + // `primary_connection_string_alias` and `secondary_connection_string_alias` are still `nil` while `data.azurerm_eventhub_namespace_authorization_rule` is retrieving acceptance. since `azurerm_eventhub_namespace_disaster_recovery_config` hasn't been created. // So these two properties should be checked in the second run. // And `depends_on` cannot be applied to `azurerm_eventhub_namespace_authorization_rule`. // Because it would throw error message `BreakPairing operation is only allowed on primary namespace with valid secondary namespace.` while destroying `azurerm_eventhub_namespace_disaster_recovery_config` if `depends_on` is applied. @@ -42,7 +41,7 @@ func TestAccEventHubNamespaceAuthorizationRuleDataSource_withAliasConnectionStri }, { Config: r.withAliasConnectionString(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("primary_connection_string_alias").Exists(), check.That(data.ResourceName).Key("secondary_connection_string_alias").Exists(), ), diff --git a/azurerm/internal/services/eventhub/eventhub_namespace_authorization_rule_resource.go b/azurerm/internal/services/eventhub/eventhub_namespace_authorization_rule_resource.go index 6f5a3fb5ad55..f88f7955b5af 100644 --- a/azurerm/internal/services/eventhub/eventhub_namespace_authorization_rule_resource.go +++ b/azurerm/internal/services/eventhub/eventhub_namespace_authorization_rule_resource.go @@ -5,23 +5,20 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - - "github.com/Azure/azure-sdk-for-go/services/preview/eventhub/mgmt/2018-01-01-preview/eventhub" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/migration" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceEventHubNamespaceAuthorizationRule() *schema.Resource { - return &schema.Resource{ +func resourceEventHubNamespaceAuthorizationRule() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceEventHubNamespaceAuthorizationRuleCreateUpdate, Read: resourceEventHubNamespaceAuthorizationRuleRead, Update: resourceEventHubNamespaceAuthorizationRuleCreateUpdate, @@ -36,23 +33,23 @@ func resourceEventHubNamespaceAuthorizationRule() *schema.Resource { 1: migration.NamespaceAuthorizationRuleV1ToV2{}, }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: eventHubAuthorizationRuleSchemaFrom(map[string]*schema.Schema{ + Schema: eventHubAuthorizationRuleSchemaFrom(map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.ValidateEventHubAuthorizationRuleName(), }, "namespace_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.ValidateEventHubNamespaceName(), @@ -65,109 +62,101 @@ func resourceEventHubNamespaceAuthorizationRule() *schema.Resource { } } -func resourceEventHubNamespaceAuthorizationRuleCreateUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clients.Client).Eventhub.NamespacesClient +func resourceEventHubNamespaceAuthorizationRuleCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Eventhub.NamespaceAuthorizationRulesClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() log.Printf("[INFO] preparing arguments for AzureRM EventHub Namespace Authorization Rule creation.") - name := d.Get("name").(string) - namespaceName := d.Get("namespace_name").(string) - resourceGroup := d.Get("resource_group_name").(string) - + id := authorizationrulesnamespaces.NewAuthorizationRuleID(subscriptionId, d.Get("resource_group_name").(string), d.Get("namespace_name").(string), d.Get("name").(string)) if d.IsNewResource() { - existing, err := client.GetAuthorizationRule(ctx, resourceGroup, namespaceName, name) + existing, err := client.NamespacesGetAuthorizationRule(ctx, id) if err != nil { - if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("Error checking for presence of existing EventHub Namespace Authorization Rule %q (Namespace %q / Resource Group %q): %s", name, namespaceName, resourceGroup, err) + if !response.WasNotFound(existing.HttpResponse) { + return fmt.Errorf("checking for presence of existing %s: %+v", id, err) } } - if existing.ID != nil && *existing.ID != "" { - return tf.ImportAsExistsError("azurerm_eventhub_namespace_authorization_rule", *existing.ID) + if !response.WasNotFound(existing.HttpResponse) { + return tf.ImportAsExistsError("azurerm_eventhub_namespace_authorization_rule", id.ID()) } } - locks.ByName(namespaceName, eventHubNamespaceResourceName) - defer locks.UnlockByName(namespaceName, eventHubNamespaceResourceName) + locks.ByName(id.NamespaceName, eventHubNamespaceResourceName) + defer locks.UnlockByName(id.NamespaceName, eventHubNamespaceResourceName) - parameters := eventhub.AuthorizationRule{ - Name: &name, - AuthorizationRuleProperties: &eventhub.AuthorizationRuleProperties{ + parameters := authorizationrulesnamespaces.AuthorizationRule{ + Name: &id.Name, + Properties: &authorizationrulesnamespaces.AuthorizationRuleProperties{ Rights: expandEventHubAuthorizationRuleRights(d), }, } - if _, err := client.CreateOrUpdateAuthorizationRule(ctx, resourceGroup, namespaceName, name, parameters); err != nil { - return fmt.Errorf("Error creating/updating EventHub Namespace Authorization Rule %q (Resource Group %q): %+v", name, resourceGroup, err) + if _, err := client.NamespacesCreateOrUpdateAuthorizationRule(ctx, id, parameters); err != nil { + return fmt.Errorf("creating/updating %s: %+v", id, err) } - read, err := client.GetAuthorizationRule(ctx, resourceGroup, namespaceName, name) - if err != nil { - return err - } - - if read.ID == nil { - return fmt.Errorf("Cannot read EventHub Namespace Authorization Rule %s (resource group %s) ID", name, resourceGroup) - } - - d.SetId(*read.ID) - + d.SetId(id.ID()) return resourceEventHubNamespaceAuthorizationRuleRead(d, meta) } -func resourceEventHubNamespaceAuthorizationRuleRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clients.Client).Eventhub.NamespacesClient +func resourceEventHubNamespaceAuthorizationRuleRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Eventhub.NamespaceAuthorizationRulesClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.NamespaceAuthorizationRuleID(d.Id()) + id, err := authorizationrulesnamespaces.AuthorizationRuleID(d.Id()) if err != nil { return err } - resp, err := client.GetAuthorizationRule(ctx, id.ResourceGroup, id.NamespaceName, id.AuthorizationRuleName) + resp, err := client.NamespacesGetAuthorizationRule(ctx, *id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { d.SetId("") return nil } - return fmt.Errorf("retrieving Authorization Rule %q (EventHub Namespace %q / Resource Group %q) : %+v", id.AuthorizationRuleName, id.NamespaceName, id.ResourceGroup, err) + return fmt.Errorf("retrieving %s: %+v", id, err) } - d.Set("name", id.AuthorizationRuleName) + d.Set("name", id.Name) d.Set("namespace_name", id.NamespaceName) d.Set("resource_group_name", id.ResourceGroup) - if properties := resp.AuthorizationRuleProperties; properties != nil { - listen, send, manage := flattenEventHubAuthorizationRuleRights(properties.Rights) - d.Set("manage", manage) - d.Set("listen", listen) - d.Set("send", send) + if model := resp.Model; model != nil { + if props := model.Properties; props != nil { + listen, send, manage := flattenEventHubAuthorizationRuleRights(props.Rights) + d.Set("manage", manage) + d.Set("listen", listen) + d.Set("send", send) + } } - keysResp, err := client.ListKeys(ctx, id.ResourceGroup, id.NamespaceName, id.AuthorizationRuleName) + keysResp, err := client.NamespacesListKeys(ctx, *id) if err != nil { - return fmt.Errorf("retrieving Keys for Authorization Rule %q (EventHub Namespace %q / Resource Group %q): %+v", id.AuthorizationRuleName, id.NamespaceName, id.ResourceGroup, err) + return fmt.Errorf("listing keys for %s: %+v", id, err) } - d.Set("primary_key", keysResp.PrimaryKey) - d.Set("secondary_key", keysResp.SecondaryKey) - d.Set("primary_connection_string", keysResp.PrimaryConnectionString) - d.Set("secondary_connection_string", keysResp.SecondaryConnectionString) - d.Set("primary_connection_string_alias", keysResp.AliasPrimaryConnectionString) - d.Set("secondary_connection_string_alias", keysResp.AliasSecondaryConnectionString) + if model := keysResp.Model; model != nil { + d.Set("primary_key", model.PrimaryKey) + d.Set("secondary_key", model.SecondaryKey) + d.Set("primary_connection_string", model.PrimaryConnectionString) + d.Set("secondary_connection_string", model.SecondaryConnectionString) + d.Set("primary_connection_string_alias", model.AliasPrimaryConnectionString) + d.Set("secondary_connection_string_alias", model.AliasSecondaryConnectionString) + } return nil } -func resourceEventHubNamespaceAuthorizationRuleDelete(d *schema.ResourceData, meta interface{}) error { - eventhubClient := meta.(*clients.Client).Eventhub.NamespacesClient +func resourceEventHubNamespaceAuthorizationRuleDelete(d *pluginsdk.ResourceData, meta interface{}) error { + eventhubClient := meta.(*clients.Client).Eventhub.NamespaceAuthorizationRulesClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.NamespaceAuthorizationRuleID(d.Id()) + id, err := authorizationrulesnamespaces.AuthorizationRuleID(d.Id()) if err != nil { return err } @@ -175,8 +164,8 @@ func resourceEventHubNamespaceAuthorizationRuleDelete(d *schema.ResourceData, me locks.ByName(id.NamespaceName, eventHubNamespaceResourceName) defer locks.UnlockByName(id.NamespaceName, eventHubNamespaceResourceName) - if _, err := eventhubClient.DeleteAuthorizationRule(ctx, id.ResourceGroup, id.NamespaceName, id.AuthorizationRuleName); err != nil { - return fmt.Errorf("deleting Authorization Rule %q (EventHub Namespace %q / Resource Group %q): %+v", id.AuthorizationRuleName, id.NamespaceName, id.ResourceGroup, err) + if _, err := eventhubClient.NamespacesDeleteAuthorizationRule(ctx, *id); err != nil { + return fmt.Errorf("deleting %s: %+v", id, err) } return nil diff --git a/azurerm/internal/services/eventhub/eventhub_namespace_authorization_rule_resource_test.go b/azurerm/internal/services/eventhub/eventhub_namespace_authorization_rule_resource_test.go index 3f2fc63f3ef4..0963699ec6d5 100644 --- a/azurerm/internal/services/eventhub/eventhub_namespace_authorization_rule_resource_test.go +++ b/azurerm/internal/services/eventhub/eventhub_namespace_authorization_rule_resource_test.go @@ -6,12 +6,11 @@ import ( "strconv" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -38,10 +37,10 @@ func testAccEventHubNamespaceAuthorizationRule(t *testing.T, listen, send, manag data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace_authorization_rule", "test") r := EventHubNamespaceAuthorizationRuleResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.base(data, listen, send, manage), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").Exists(), check.That(data.ResourceName).Key("namespace_name").Exists(), @@ -62,10 +61,10 @@ func TestAccEventHubNamespaceAuthorizationRule_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace_authorization_rule", "test") r := EventHubNamespaceAuthorizationRuleResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.base(data, true, true, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -80,10 +79,10 @@ func TestAccEventHubNamespaceAuthorizationRule_rightsUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace_authorization_rule", "test") r := EventHubNamespaceAuthorizationRuleResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.base(data, true, false, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("listen").HasValue("true"), check.That(data.ResourceName).Key("send").HasValue("false"), @@ -92,7 +91,7 @@ func TestAccEventHubNamespaceAuthorizationRule_rightsUpdate(t *testing.T) { }, { Config: r.base(data, true, true, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").Exists(), check.That(data.ResourceName).Key("namespace_name").Exists(), @@ -113,20 +112,20 @@ func TestAccEventHubNamespaceAuthorizationRule_withAliasConnectionString(t *test data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace_authorization_rule", "test") r := EventHubNamespaceAuthorizationRuleResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // `primary_connection_string_alias` and `secondary_connection_string_alias` are still `nil` in `azurerm_eventhub_namespace_authorization_rule` after created `azurerm_eventhub_namespace` successfully since `azurerm_eventhub_namespace_disaster_recovery_config` hasn't been created. // So these two properties should be checked in the second run. // And `depends_on` cannot be applied to `azurerm_eventhub_namespace_authorization_rule`. // Because it would throw error message `BreakPairing operation is only allowed on primary namespace with valid secondary namespace.` while destroying `azurerm_eventhub_namespace_disaster_recovery_config` if `depends_on` is applied. Config: r.withAliasConnectionString(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Config: r.withAliasConnectionString(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("primary_connection_string_alias").Exists(), check.That(data.ResourceName).Key("secondary_connection_string_alias").Exists(), ), @@ -141,10 +140,10 @@ func TestAccEventHubNamespaceAuthorizationRule_multi(t *testing.T) { resourceTwoName := "azurerm_eventhub_namespace_authorization_rule.test2" resourceThreeName := "azurerm_eventhub_namespace_authorization_rule.test3" - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.multi(data, true, true, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("manage").HasValue("false"), check.That(data.ResourceName).Key("send").HasValue("true"), @@ -152,17 +151,17 @@ func TestAccEventHubNamespaceAuthorizationRule_multi(t *testing.T) { check.That(data.ResourceName).Key("primary_connection_string").Exists(), check.That(data.ResourceName).Key("secondary_connection_string").Exists(), check.That(resourceTwoName).ExistsInAzure(r), - resource.TestCheckResourceAttr(resourceTwoName, "manage", "false"), - resource.TestCheckResourceAttr(resourceTwoName, "send", "true"), - resource.TestCheckResourceAttr(resourceTwoName, "listen", "true"), - resource.TestCheckResourceAttrSet(resourceTwoName, "primary_connection_string"), - resource.TestCheckResourceAttrSet(resourceTwoName, "secondary_connection_string"), + acceptance.TestCheckResourceAttr(resourceTwoName, "manage", "false"), + acceptance.TestCheckResourceAttr(resourceTwoName, "send", "true"), + acceptance.TestCheckResourceAttr(resourceTwoName, "listen", "true"), + acceptance.TestCheckResourceAttrSet(resourceTwoName, "primary_connection_string"), + acceptance.TestCheckResourceAttrSet(resourceTwoName, "secondary_connection_string"), check.That(resourceThreeName).ExistsInAzure(r), - resource.TestCheckResourceAttr(resourceThreeName, "manage", "false"), - resource.TestCheckResourceAttr(resourceThreeName, "send", "true"), - resource.TestCheckResourceAttr(resourceThreeName, "listen", "true"), - resource.TestCheckResourceAttrSet(resourceThreeName, "primary_connection_string"), - resource.TestCheckResourceAttrSet(resourceThreeName, "secondary_connection_string"), + acceptance.TestCheckResourceAttr(resourceThreeName, "manage", "false"), + acceptance.TestCheckResourceAttr(resourceThreeName, "send", "true"), + acceptance.TestCheckResourceAttr(resourceThreeName, "listen", "true"), + acceptance.TestCheckResourceAttrSet(resourceThreeName, "primary_connection_string"), + acceptance.TestCheckResourceAttrSet(resourceThreeName, "secondary_connection_string"), ), }, data.ImportStep(), @@ -171,18 +170,18 @@ func TestAccEventHubNamespaceAuthorizationRule_multi(t *testing.T) { }) } -func (EventHubNamespaceAuthorizationRuleResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { - id, err := parse.NamespaceAuthorizationRuleID(state.ID) +func (EventHubNamespaceAuthorizationRuleResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := authorizationrulesnamespaces.AuthorizationRuleID(state.ID) if err != nil { return nil, err } - resp, err := clients.Eventhub.NamespacesClient.GetAuthorizationRule(ctx, id.ResourceGroup, id.NamespaceName, id.AuthorizationRuleName) + resp, err := clients.Eventhub.NamespaceAuthorizationRulesClient.NamespacesGetAuthorizationRule(ctx, *id) if err != nil { return nil, fmt.Errorf("retrieving %s: %v", id.String(), err) } - return utils.Bool(resp.AuthorizationRuleProperties != nil), nil + return utils.Bool(resp.Model != nil), nil } func (EventHubNamespaceAuthorizationRuleResource) base(data acceptance.TestData, listen, send, manage bool) string { diff --git a/azurerm/internal/services/eventhub/eventhub_namespace_customer_managed_key_resource.go b/azurerm/internal/services/eventhub/eventhub_namespace_customer_managed_key_resource.go new file mode 100644 index 000000000000..87eb88c31bc6 --- /dev/null +++ b/azurerm/internal/services/eventhub/eventhub_namespace_customer_managed_key_resource.go @@ -0,0 +1,245 @@ +package eventhub + +import ( + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/response" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/namespaces" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/validate" + keyVaultParse "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/keyvault/parse" + keyVaultValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/keyvault/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceEventHubNamespaceCustomerManagedKey() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceEventHubNamespaceCustomerManagedKeyCreateUpdate, + Read: resourceEventHubNamespaceCustomerManagedKeyRead, + Update: resourceEventHubNamespaceCustomerManagedKeyCreateUpdate, + Delete: resourceEventHubNamespaceCustomerManagedKeyDelete, + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Importer: pluginsdk.DefaultImporter(), + + Schema: map[string]*pluginsdk.Schema{ + "eventhub_namespace_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.NamespaceID, + }, + + "key_vault_key_ids": { + Type: pluginsdk.TypeSet, + Required: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: keyVaultValidate.NestedItemIdWithOptionalVersion, + }, + }, + }, + } +} + +func resourceEventHubNamespaceCustomerManagedKeyCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Eventhub.NamespacesClient + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := namespaces.ParseNamespaceID(d.Get("eventhub_namespace_id").(string)) + if err != nil { + return err + } + + locks.ByName(id.Name, "azurerm_eventhub_namespace") + defer locks.UnlockByName(id.Name, "azurerm_eventhub_namespace") + + resp, err := client.Get(ctx, *id) + if err != nil { + return fmt.Errorf("retrieving %s: %+v", *id, err) + } + if resp.Model == nil { + return fmt.Errorf("retrieving %s: `model` was nil", *id) + } + + if d.IsNewResource() { + if resp.Model.Properties != nil && resp.Model.Properties.Encryption != nil { + return tf.ImportAsExistsError("azurerm_eventhub_namespace_customer_managed_key", id.ID()) + } + } + + namespace := resp.Model + + keySource := namespaces.KeySourceMicrosoftKeyVault + namespace.Properties.Encryption = &namespaces.Encryption{ + KeySource: &keySource, + } + + keyVaultProps, err := expandEventHubNamespaceKeyVaultKeyIds(d.Get("key_vault_key_ids").(*pluginsdk.Set).List()) + if err != nil { + return err + } + namespace.Properties.Encryption.KeyVaultProperties = keyVaultProps + + if err := client.CreateOrUpdateThenPoll(ctx, *id, *namespace); err != nil { + return fmt.Errorf("creating/updating %s: %+v", *id, err) + } + + d.SetId(id.ID()) + + return resourceEventHubNamespaceCustomerManagedKeyRead(d, meta) +} + +func resourceEventHubNamespaceCustomerManagedKeyRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Eventhub.NamespacesClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := namespaces.ParseNamespaceID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, *id) + if err != nil { + if response.WasNotFound(resp.HttpResponse) { + d.SetId("") + return nil + } + return fmt.Errorf("retrieving %s: %+v", *id, err) + } + if resp.Model == nil { + return fmt.Errorf("retrieving %s: `model` was nil", *id) + } + if resp.Model.Properties == nil && resp.Model.Properties.Encryption == nil { + d.SetId("") + return nil + } + + d.Set("eventhub_namespace_id", id.ID()) + + if props := resp.Model.Properties; props != nil { + keyVaultKeyIds, err := flattenEventHubNamespaceKeyVaultKeyIds(props.Encryption) + if err != nil { + return err + } + + d.Set("key_vault_key_ids", keyVaultKeyIds) + } + + return nil +} + +func resourceEventHubNamespaceCustomerManagedKeyDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Eventhub.NamespacesClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := namespaces.ParseNamespaceID(d.Id()) + if err != nil { + return err + } + + locks.ByName(id.Name, "azurerm_eventhub_namespace") + defer locks.UnlockByName(id.Name, "azurerm_eventhub_namespace") + + resp, err := client.Get(ctx, *id) + if err != nil { + if response.WasNotFound(resp.HttpResponse) { + return nil + } + return fmt.Errorf("retrieving %s: %+v", *id, err) + } + + // Since this isn't a real object and it cannot be disabled once Customer Managed Key at rest has been enabled + // And it must keep at least one key once Customer Managed Key is enabled + // So for the delete operation, it has to recreate the EventHub Namespace with disabled Customer Managed Key + future, err := client.Delete(ctx, *id) + if err != nil { + if response.WasNotFound(future.HttpResponse) { + return nil + } + return fmt.Errorf("deleting %s: %+v", *id, err) + } + + if err := waitForEventHubNamespaceToBeDeleted(ctx, client, *id); err != nil { + return err + } + + namespace := resp.Model + namespace.Properties.Encryption = nil + + if err = client.CreateOrUpdateThenPoll(ctx, *id, *namespace); err != nil { + return fmt.Errorf("removing %s: %+v", *id, err) + } + + return nil +} + +func expandEventHubNamespaceKeyVaultKeyIds(input []interface{}) (*[]namespaces.KeyVaultProperties, error) { + if len(input) == 0 { + return nil, nil + } + + results := make([]namespaces.KeyVaultProperties, 0) + + for _, item := range input { + keyId, err := keyVaultParse.ParseOptionallyVersionedNestedItemID(item.(string)) + if err != nil { + return nil, err + } + + results = append(results, namespaces.KeyVaultProperties{ + KeyName: utils.String(keyId.Name), + KeyVaultUri: utils.String(keyId.KeyVaultBaseUrl), + KeyVersion: utils.String(keyId.Version), + }) + } + + return &results, nil +} + +func flattenEventHubNamespaceKeyVaultKeyIds(input *namespaces.Encryption) ([]interface{}, error) { + results := make([]interface{}, 0) + if input == nil || input.KeyVaultProperties == nil { + return results, nil + } + + for _, item := range *input.KeyVaultProperties { + var keyName string + if item.KeyName != nil { + keyName = *item.KeyName + } + + var keyVaultUri string + if item.KeyVaultUri != nil { + keyVaultUri = *item.KeyVaultUri + } + + var keyVersion string + if item.KeyVersion != nil { + keyVersion = *item.KeyVersion + } + + keyVaultKeyId, err := keyVaultParse.NewNestedItemID(keyVaultUri, "keys", keyName, keyVersion) + if err != nil { + return nil, err + } + + results = append(results, keyVaultKeyId.ID()) + } + + return results, nil +} diff --git a/azurerm/internal/services/eventhub/eventhub_namespace_customer_managed_key_resource_test.go b/azurerm/internal/services/eventhub/eventhub_namespace_customer_managed_key_resource_test.go new file mode 100644 index 000000000000..d45604eba18e --- /dev/null +++ b/azurerm/internal/services/eventhub/eventhub_namespace_customer_managed_key_resource_test.go @@ -0,0 +1,260 @@ +package eventhub_test + +import ( + "context" + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/namespaces" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type EventHubNamespaceCustomerManagedKeyResource struct { +} + +func TestAccEventHubNamespaceCustomerManagedKey_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace_customer_managed_key", "test") + r := EventHubNamespaceCustomerManagedKeyResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccEventHubNamespaceCustomerManagedKey_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace_customer_managed_key", "test") + r := EventHubNamespaceCustomerManagedKeyResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func TestAccEventHubNamespaceCustomerManagedKey_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace_customer_managed_key", "test") + r := EventHubNamespaceCustomerManagedKeyResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccEventHubNamespaceCustomerManagedKey_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace_customer_managed_key", "test") + r := EventHubNamespaceCustomerManagedKeyResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.update(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func (r EventHubNamespaceCustomerManagedKeyResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := namespaces.ParseNamespaceID(state.ID) + if err != nil { + return nil, err + } + + resp, err := clients.Eventhub.NamespacesClient.Get(ctx, *id) + if err != nil { + return nil, fmt.Errorf("retrieving %s: %v", id.String(), err) + } + if resp.Model == nil { + return nil, fmt.Errorf("retrieving %s: `model` was nil", *id) + } + + if resp.Model.Properties == nil || resp.Model.Properties.Encryption == nil { + return utils.Bool(false), nil + } + + return utils.Bool(true), nil +} + +func (r EventHubNamespaceCustomerManagedKeyResource) requiresImport(data acceptance.TestData) string { + template := EventHubNamespaceCustomerManagedKeyResource{}.basic(data) + return fmt.Sprintf(` +%s + +resource "azurerm_eventhub_namespace_customer_managed_key" "import" { + eventhub_namespace_id = azurerm_eventhub_namespace_customer_managed_key.test.eventhub_namespace_id + key_vault_key_ids = azurerm_eventhub_namespace_customer_managed_key.test.key_vault_key_ids +} +`, template) +} + +func (r EventHubNamespaceCustomerManagedKeyResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_eventhub_namespace_customer_managed_key" "test" { + eventhub_namespace_id = azurerm_eventhub_namespace.test.id + key_vault_key_ids = [azurerm_key_vault_key.test.id] +} +`, r.template(data)) +} + +func (r EventHubNamespaceCustomerManagedKeyResource) update(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_key_vault_key" "test2" { + name = "acctestkvkey2%s" + key_vault_id = azurerm_key_vault.test.id + key_type = "RSA" + key_size = 2048 + key_opts = ["decrypt", "encrypt", "sign", "unwrapKey", "verify", "wrapKey"] + + depends_on = [ + azurerm_key_vault_access_policy.test, + azurerm_key_vault_access_policy.test2, + ] +} + +resource "azurerm_eventhub_namespace_customer_managed_key" "test" { + eventhub_namespace_id = azurerm_eventhub_namespace.test.id + key_vault_key_ids = [azurerm_key_vault_key.test2.id] +} +`, r.template(data), data.RandomString) +} + +func (r EventHubNamespaceCustomerManagedKeyResource) complete(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_key_vault_key" "test2" { + name = "acctestkvkey2%s" + key_vault_id = azurerm_key_vault.test.id + key_type = "RSA" + key_size = 2048 + key_opts = ["decrypt", "encrypt", "sign", "unwrapKey", "verify", "wrapKey"] + + depends_on = [ + azurerm_key_vault_access_policy.test, + azurerm_key_vault_access_policy.test2, + ] +} + +resource "azurerm_eventhub_namespace_customer_managed_key" "test" { + eventhub_namespace_id = azurerm_eventhub_namespace.test.id + key_vault_key_ids = [azurerm_key_vault_key.test.id, azurerm_key_vault_key.test2.id] +} +`, r.template(data), data.RandomString) +} + +func (r EventHubNamespaceCustomerManagedKeyResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features { + key_vault { + purge_soft_delete_on_destroy = false + } + } +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-namespacecmk-%d" + location = "%s" +} + +resource "azurerm_eventhub_cluster" "test" { + name = "acctest-cluster-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + sku_name = "Dedicated_1" +} + +resource "azurerm_eventhub_namespace" "test" { + name = "acctest-namespace-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + sku = "Standard" + dedicated_cluster_id = azurerm_eventhub_cluster.test.id + + identity { + type = "SystemAssigned" + } +} + +data "azurerm_client_config" "current" {} + +resource "azurerm_key_vault" "test" { + name = "acctestkv%s" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + tenant_id = data.azurerm_client_config.current.tenant_id + sku_name = "standard" + soft_delete_enabled = true + purge_protection_enabled = true +} + +resource "azurerm_key_vault_access_policy" "test" { + key_vault_id = azurerm_key_vault.test.id + tenant_id = azurerm_eventhub_namespace.test.identity.0.tenant_id + object_id = azurerm_eventhub_namespace.test.identity.0.principal_id + + key_permissions = ["get", "unwrapkey", "wrapkey"] +} + +resource "azurerm_key_vault_access_policy" "test2" { + key_vault_id = azurerm_key_vault.test.id + tenant_id = data.azurerm_client_config.current.tenant_id + object_id = data.azurerm_client_config.current.object_id + + key_permissions = [ + "create", + "delete", + "get", + "list", + "purge", + "recover", + ] +} + +resource "azurerm_key_vault_key" "test" { + name = "acctestkvkey%s" + key_vault_id = azurerm_key_vault.test.id + key_type = "RSA" + key_size = 2048 + key_opts = ["decrypt", "encrypt", "sign", "unwrapKey", "verify", "wrapKey"] + + depends_on = [ + azurerm_key_vault_access_policy.test, + azurerm_key_vault_access_policy.test2, + ] +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomString, data.RandomString) +} diff --git a/azurerm/internal/services/eventhub/eventhub_namespace_data_source.go b/azurerm/internal/services/eventhub/eventhub_namespace_data_source.go index 8dee650bc277..701424c6647d 100644 --- a/azurerm/internal/services/eventhub/eventhub_namespace_data_source.go +++ b/azurerm/internal/services/eventhub/eventhub_namespace_data_source.go @@ -5,25 +5,28 @@ import ( "log" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/namespaces" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func EventHubNamespaceDataSource() *schema.Resource { - return &schema.Resource{ +func EventHubNamespaceDataSource() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: EventHubNamespaceDataSourceRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, @@ -32,73 +35,73 @@ func EventHubNamespaceDataSource() *schema.Resource { "location": azure.SchemaLocationForDataSource(), "default_primary_connection_string_alias": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "default_secondary_connection_string_alias": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "auto_inflate_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "zone_redundant": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "dedicated_cluster_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "capacity": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "kafka_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "maximum_throughput_units": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "default_primary_connection_string": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "default_primary_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "default_secondary_connection_string": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "default_secondary_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "sku": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, @@ -107,53 +110,61 @@ func EventHubNamespaceDataSource() *schema.Resource { } } -func EventHubNamespaceDataSourceRead(d *schema.ResourceData, meta interface{}) error { +func EventHubNamespaceDataSourceRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Eventhub.NamespacesClient + authorizationRulesClient := meta.(*clients.Client).Eventhub.NamespaceAuthorizationRulesClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - resourceGroup := d.Get("resource_group_name").(string) - name := d.Get("name").(string) - - resp, err := client.Get(ctx, resourceGroup, name) + id := namespaces.NewNamespaceID(subscriptionId, d.Get("resource_group_name").(string), d.Get("name").(string)) + resp, err := client.Get(ctx, id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { - return fmt.Errorf("Error: EventHub Namespace %q (Resource Group %q) was not found", name, resourceGroup) + if response.WasNotFound(resp.HttpResponse) { + return fmt.Errorf("%s was not found", id) } - return fmt.Errorf("Error making Read request on EventHub Namespace %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("retrieving %s: %+v", id, err) } - d.SetId(*resp.ID) + d.SetId(id.ID()) - d.Set("name", resp.Name) - d.Set("resource_group_name", resourceGroup) - if location := resp.Location; location != nil { - d.Set("location", azure.NormalizeLocation(*location)) - } + d.Set("name", id.Name) + d.Set("resource_group_name", id.ResourceGroup) - d.Set("sku", string(resp.Sku.Name)) - d.Set("capacity", resp.Sku.Capacity) + if model := resp.Model; model != nil { + d.Set("location", location.NormalizeNilable(model.Location)) + if sku := model.Sku; sku != nil { + d.Set("sku", string(sku.Name)) + d.Set("capacity", sku.Capacity) + } - keys, err := client.ListKeys(ctx, resourceGroup, name, eventHubNamespaceDefaultAuthorizationRule) - if err != nil { - log.Printf("[WARN] Unable to List default keys for EventHub Namespace %q (Resource Group %q): %+v", name, resourceGroup, err) - } else { - d.Set("default_primary_connection_string_alias", keys.AliasPrimaryConnectionString) - d.Set("default_secondary_connection_string_alias", keys.AliasSecondaryConnectionString) - d.Set("default_primary_connection_string", keys.PrimaryConnectionString) - d.Set("default_secondary_connection_string", keys.SecondaryConnectionString) - d.Set("default_primary_key", keys.PrimaryKey) - d.Set("default_secondary_key", keys.SecondaryKey) + if props := model.Properties; props != nil { + d.Set("auto_inflate_enabled", props.IsAutoInflateEnabled) + d.Set("kafka_enabled", props.KafkaEnabled) + d.Set("maximum_throughput_units", int(*props.MaximumThroughputUnits)) + d.Set("zone_redundant", props.ZoneRedundant) + d.Set("dedicated_cluster_id", props.ClusterArmId) + } + + if err := tags.FlattenAndSet(d, flattenTags(model.Tags)); err != nil { + return fmt.Errorf("setting `tags`: %+v", err) + } } - if props := resp.EHNamespaceProperties; props != nil { - d.Set("auto_inflate_enabled", props.IsAutoInflateEnabled) - d.Set("kafka_enabled", props.KafkaEnabled) - d.Set("maximum_throughput_units", int(*props.MaximumThroughputUnits)) - d.Set("zone_redundant", props.ZoneRedundant) - d.Set("dedicated_cluster_id", props.ClusterArmID) + defaultRuleId := authorizationrulesnamespaces.NewAuthorizationRuleID(id.SubscriptionId, id.ResourceGroup, id.Name, eventHubNamespaceDefaultAuthorizationRule) + keys, err := authorizationRulesClient.NamespacesListKeys(ctx, defaultRuleId) + if err != nil { + log.Printf("[WARN] Unable to List default keys for %s: %+v", id, err) + } + if model := keys.Model; model != nil { + d.Set("default_primary_connection_string_alias", model.AliasPrimaryConnectionString) + d.Set("default_secondary_connection_string_alias", model.AliasSecondaryConnectionString) + d.Set("default_primary_connection_string", model.PrimaryConnectionString) + d.Set("default_secondary_connection_string", model.SecondaryConnectionString) + d.Set("default_primary_key", model.PrimaryKey) + d.Set("default_secondary_key", model.SecondaryKey) } - return tags.FlattenAndSet(d, resp.Tags) + return nil } diff --git a/azurerm/internal/services/eventhub/eventhub_namespace_data_source_test.go b/azurerm/internal/services/eventhub/eventhub_namespace_data_source_test.go index efd8cb0918f4..3509f2c5cba3 100644 --- a/azurerm/internal/services/eventhub/eventhub_namespace_data_source_test.go +++ b/azurerm/internal/services/eventhub/eventhub_namespace_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -16,10 +15,10 @@ func TestAccEventHubNamespaceDataSource_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_eventhub_namespace", "test") r := EventHubNamespaceDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("sku").HasValue("Basic"), ), }, @@ -30,10 +29,10 @@ func TestAccEventHubNamespaceDataSource_complete(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_eventhub_namespace", "test") r := EventHubNamespaceDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.complete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("sku").HasValue("Standard"), check.That(data.ResourceName).Key("capacity").HasValue("2"), check.That(data.ResourceName).Key("auto_inflate_enabled").HasValue("true"), @@ -47,15 +46,15 @@ func TestAccEventHubNamespaceDataSource_withAliasConnectionString(t *testing.T) data := acceptance.BuildTestData(t, "data.azurerm_eventhub_namespace", "test") r := EventHubNamespaceDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { - // `default_primary_connection_string_alias` and `default_secondary_connection_string_alias` are still `nil` while `data.azurerm_eventhub_namespace` is retrieving resource. since `azurerm_eventhub_namespace_disaster_recovery_config` hasn't been created. + // `default_primary_connection_string_alias` and `default_secondary_connection_string_alias` are still `nil` while `data.azurerm_eventhub_namespace` is retrieving acceptance. since `azurerm_eventhub_namespace_disaster_recovery_config` hasn't been created. // So these two properties should be checked in the second run. Config: r.withAliasConnectionString(data), }, { Config: r.withAliasConnectionString(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("default_primary_connection_string_alias").Exists(), check.That(data.ResourceName).Key("default_secondary_connection_string_alias").Exists(), ), diff --git a/azurerm/internal/services/eventhub/eventhub_namespace_disaster_recovery_config_resource.go b/azurerm/internal/services/eventhub/eventhub_namespace_disaster_recovery_config_resource.go index d64e88296c12..8033de476b0c 100644 --- a/azurerm/internal/services/eventhub/eventhub_namespace_disaster_recovery_config_resource.go +++ b/azurerm/internal/services/eventhub/eventhub_namespace_disaster_recovery_config_resource.go @@ -4,25 +4,24 @@ import ( "context" "fmt" "log" - "net/http" "strconv" "time" - "github.com/Azure/azure-sdk-for-go/services/preview/eventhub/mgmt/2018-01-01-preview/eventhub" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceEventHubNamespaceDisasterRecoveryConfig() *schema.Resource { - return &schema.Resource{ +func resourceEventHubNamespaceDisasterRecoveryConfig() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceEventHubNamespaceDisasterRecoveryConfigCreate, Read: resourceEventHubNamespaceDisasterRecoveryConfigRead, Update: resourceEventHubNamespaceDisasterRecoveryConfigUpdate, @@ -31,23 +30,23 @@ func resourceEventHubNamespaceDisasterRecoveryConfig() *schema.Resource { // TODO: replace this with an importer which validates the ID during import Importer: pluginsdk.DefaultImporter(), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.ValidateEventHubAuthorizationRuleName(), }, "namespace_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.ValidateEventHubNamespaceName(), @@ -56,14 +55,14 @@ func resourceEventHubNamespaceDisasterRecoveryConfig() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "partner_namespace_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: azure.ValidateResourceIDOrEmpty, }, // this property is broken and should not be reimplemented after 3.0 until this is addressed: https://github.com/Azure/azure-sdk-for-go/issues/5893 "alternate_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validate.ValidateEventHubNamespaceName(), Deprecated: "This property has been deprecated and will be removed in v3.0 of the provider as any DRC created with an alternate name cannot be deleted and the service is not going to change this. Please see: https://github.com/Azure/azure-sdk-for-go/issues/5893", @@ -72,253 +71,245 @@ func resourceEventHubNamespaceDisasterRecoveryConfig() *schema.Resource { } } -func resourceEventHubNamespaceDisasterRecoveryConfigCreate(d *schema.ResourceData, meta interface{}) error { +func resourceEventHubNamespaceDisasterRecoveryConfigCreate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Eventhub.DisasterRecoveryConfigsClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() log.Printf("[INFO] preparing arguments for AzureRM EventHub Namespace Disaster Recovery Configs creation.") - name := d.Get("name").(string) - namespaceName := d.Get("namespace_name").(string) - resourceGroup := d.Get("resource_group_name").(string) + id := disasterrecoveryconfigs.NewDisasterRecoveryConfigID(subscriptionId, d.Get("resource_group_name").(string), d.Get("namespace_name").(string), d.Get("name").(string)) if d.IsNewResource() { - existing, err := client.Get(ctx, resourceGroup, namespaceName, name) + existing, err := client.Get(ctx, id) if err != nil { - if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("Error checking for presence of existing EventHub Namespace Disaster Recovery Configs %q (Namespace %q / Resource Group %q): %s", name, namespaceName, resourceGroup, err) + if !response.WasNotFound(existing.HttpResponse) { + return fmt.Errorf("checking for presence of existing %s: %+v", id, err) } } - if existing.ID != nil && *existing.ID != "" { - return tf.ImportAsExistsError("azurerm_eventhub_namespace_disaster_recovery_config", *existing.ID) + if !response.WasNotFound(existing.HttpResponse) { + return tf.ImportAsExistsError("azurerm_eventhub_namespace_disaster_recovery_config", id.ID()) } } - locks.ByName(namespaceName, eventHubNamespaceResourceName) - defer locks.UnlockByName(namespaceName, eventHubNamespaceResourceName) + locks.ByName(id.NamespaceName, eventHubNamespaceResourceName) + defer locks.UnlockByName(id.NamespaceName, eventHubNamespaceResourceName) - parameters := eventhub.ArmDisasterRecovery{ - ArmDisasterRecoveryProperties: &eventhub.ArmDisasterRecoveryProperties{ + parameters := disasterrecoveryconfigs.ArmDisasterRecovery{ + Properties: &disasterrecoveryconfigs.ArmDisasterRecoveryProperties{ PartnerNamespace: utils.String(d.Get("partner_namespace_id").(string)), }, } if v, ok := d.GetOk("alternate_name"); ok { - parameters.ArmDisasterRecoveryProperties.AlternateName = utils.String(v.(string)) + parameters.Properties.AlternateName = utils.String(v.(string)) } - if _, err := client.CreateOrUpdate(ctx, resourceGroup, namespaceName, name, parameters); err != nil { - return fmt.Errorf("Error creating/updating EventHub Namespace Disaster Recovery Configs %q (Namespace %q / Resource Group %q): %s", name, namespaceName, resourceGroup, err) + if _, err := client.CreateOrUpdate(ctx, id, parameters); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) } - if err := resourceEventHubNamespaceDisasterRecoveryConfigWaitForState(ctx, client, resourceGroup, namespaceName, name, d.Timeout(schema.TimeoutCreate)); err != nil { - return fmt.Errorf("Error waiting for replication to complete for EventHub Namespace Disaster Recovery Configs %q (Namespace %q / Resource Group %q): %s", name, namespaceName, resourceGroup, err) + if err := resourceEventHubNamespaceDisasterRecoveryConfigWaitForState(ctx, client, id); err != nil { + return fmt.Errorf("waiting for replication of %s: %+v", id, err) } - read, err := client.Get(ctx, resourceGroup, namespaceName, name) - if err != nil { - return fmt.Errorf("Error reading EventHub Namespace Disaster Recovery Configs %q (Namespace %q / Resource Group %q): %v", name, namespaceName, resourceGroup, err) - } - - if read.ID == nil { - return fmt.Errorf("Got nil ID for EventHub Namespace Disaster Recovery Configs %q (Namespace %q / Resource Group %q)", name, namespaceName, resourceGroup) - } - - d.SetId(*read.ID) - + d.SetId(id.ID()) return resourceEventHubNamespaceDisasterRecoveryConfigRead(d, meta) } -func resourceEventHubNamespaceDisasterRecoveryConfigUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceEventHubNamespaceDisasterRecoveryConfigUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Eventhub.DisasterRecoveryConfigsClient ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := azure.ParseAzureResourceID(d.Id()) + id, err := disasterrecoveryconfigs.DisasterRecoveryConfigID(d.Id()) if err != nil { return err } - name := id.Path["disasterRecoveryConfigs"] - resourceGroup := id.ResourceGroup - namespaceName := id.Path["namespaces"] - - locks.ByName(namespaceName, eventHubNamespaceResourceName) - defer locks.UnlockByName(namespaceName, eventHubNamespaceResourceName) + locks.ByName(id.NamespaceName, eventHubNamespaceResourceName) + defer locks.UnlockByName(id.NamespaceName, eventHubNamespaceResourceName) if d.HasChange("partner_namespace_id") { // break pairing - breakPair, err := client.BreakPairing(ctx, resourceGroup, namespaceName, name) - if breakPair.StatusCode != http.StatusOK { - return fmt.Errorf("Error issuing break pairing request for EventHub Namespace Disaster Recovery Configs %q (Namespace %q / Resource Group %q): %s", name, namespaceName, resourceGroup, err) + if _, err := client.BreakPairing(ctx, *id); err != nil { + return fmt.Errorf("breaking the pairing for %s: %+v", *id, err) } - if err := resourceEventHubNamespaceDisasterRecoveryConfigWaitForState(ctx, client, resourceGroup, namespaceName, name, d.Timeout(schema.TimeoutUpdate)); err != nil { - return fmt.Errorf("Error waiting for break pairing request to complete for EventHub Namespace Disaster Recovery Configs %q (Namespace %q / Resource Group %q): %s", name, namespaceName, resourceGroup, err) + if err := resourceEventHubNamespaceDisasterRecoveryConfigWaitForState(ctx, client, *id); err != nil { + return fmt.Errorf("waiting for the pairing to be broken for %s: %+v", *id, err) } } - parameters := eventhub.ArmDisasterRecovery{ - ArmDisasterRecoveryProperties: &eventhub.ArmDisasterRecoveryProperties{ + parameters := disasterrecoveryconfigs.ArmDisasterRecovery{ + Properties: &disasterrecoveryconfigs.ArmDisasterRecoveryProperties{ PartnerNamespace: utils.String(d.Get("partner_namespace_id").(string)), }, } if v, ok := d.GetOk("alternate_name"); ok { - parameters.ArmDisasterRecoveryProperties.AlternateName = utils.String(v.(string)) + parameters.Properties.AlternateName = utils.String(v.(string)) } - if _, err := client.CreateOrUpdate(ctx, resourceGroup, namespaceName, name, parameters); err != nil { - return fmt.Errorf("Error creating/updating EventHub Namespace Disaster Recovery Configs %q (Namespace %q / Resource Group %q): %s", name, namespaceName, resourceGroup, err) + if _, err := client.CreateOrUpdate(ctx, *id, parameters); err != nil { + return fmt.Errorf("updating %s: %+v", *id, err) } - if err := resourceEventHubNamespaceDisasterRecoveryConfigWaitForState(ctx, client, resourceGroup, namespaceName, name, d.Timeout(schema.TimeoutUpdate)); err != nil { - return fmt.Errorf("Error waiting for replication to complete for EventHub Namespace Disaster Recovery Configs %q (Namespace %q / Resource Group %q): %s", name, namespaceName, resourceGroup, err) + if err := resourceEventHubNamespaceDisasterRecoveryConfigWaitForState(ctx, client, *id); err != nil { + return fmt.Errorf("waiting for replication after update of %s: %+v", *id, err) } return resourceEventHubNamespaceDisasterRecoveryConfigRead(d, meta) } -func resourceEventHubNamespaceDisasterRecoveryConfigRead(d *schema.ResourceData, meta interface{}) error { +func resourceEventHubNamespaceDisasterRecoveryConfigRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Eventhub.DisasterRecoveryConfigsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := azure.ParseAzureResourceID(d.Id()) + id, err := disasterrecoveryconfigs.DisasterRecoveryConfigID(d.Id()) if err != nil { return err } - name := id.Path["disasterRecoveryConfigs"] - resourceGroup := id.ResourceGroup - namespaceName := id.Path["namespaces"] - - resp, err := client.Get(ctx, resourceGroup, namespaceName, name) + resp, err := client.Get(ctx, *id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { d.SetId("") return nil } - return fmt.Errorf("Error making Read request on Azure EventHub Namespace Disaster Recovery Configs %q (Namespace %q / Resource Group %q): %s", name, namespaceName, resourceGroup, err) + return fmt.Errorf("retrieving %s: %+v", *id, err) } - d.Set("name", name) - d.Set("namespace_name", namespaceName) - d.Set("resource_group_name", resourceGroup) + d.Set("name", id.Name) + d.Set("namespace_name", id.NamespaceName) + d.Set("resource_group_name", id.ResourceGroup) - if properties := resp.ArmDisasterRecoveryProperties; properties != nil { - d.Set("partner_namespace_id", properties.PartnerNamespace) - d.Set("alternate_name", properties.AlternateName) + if model := resp.Model; model != nil && model.Properties != nil { + d.Set("partner_namespace_id", model.Properties.PartnerNamespace) + d.Set("alternate_name", model.Properties.AlternateName) } return nil } -func resourceEventHubNamespaceDisasterRecoveryConfigDelete(d *schema.ResourceData, meta interface{}) error { +func resourceEventHubNamespaceDisasterRecoveryConfigDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Eventhub.DisasterRecoveryConfigsClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := azure.ParseAzureResourceID(d.Id()) + id, err := disasterrecoveryconfigs.DisasterRecoveryConfigID(d.Id()) if err != nil { return err } - name := id.Path["disasterRecoveryConfigs"] - resourceGroup := id.ResourceGroup - namespaceName := id.Path["namespaces"] + locks.ByName(id.NamespaceName, eventHubNamespaceResourceName) + defer locks.UnlockByName(id.NamespaceName, eventHubNamespaceResourceName) - locks.ByName(namespaceName, eventHubNamespaceResourceName) - defer locks.UnlockByName(namespaceName, eventHubNamespaceResourceName) - - breakPair, err := client.BreakPairing(ctx, resourceGroup, namespaceName, name) - if err != nil { - return fmt.Errorf("Error issuing break pairing request for EventHub Namespace Disaster Recovery Configs %q (Namespace %q / Resource Group %q): %s", name, namespaceName, resourceGroup, err) + if _, err := client.BreakPairing(ctx, *id); err != nil { + return fmt.Errorf("breaking pairing of %s: %+v", *id, err) } - if breakPair.StatusCode != http.StatusOK { - return fmt.Errorf("Error breaking pairing for EventHub Namespace Disaster Recovery Configs %q (Namespace %q / Resource Group %q): %s", name, namespaceName, resourceGroup, err) + + if err := resourceEventHubNamespaceDisasterRecoveryConfigWaitForState(ctx, client, *id); err != nil { + return fmt.Errorf("waiting for pairing to break for %s: %+v", *id, err) } - if err := resourceEventHubNamespaceDisasterRecoveryConfigWaitForState(ctx, client, resourceGroup, namespaceName, name, d.Timeout(schema.TimeoutDelete)); err != nil { - return fmt.Errorf("Error waiting for break pairing request to complete for EventHub Namespace Disaster Recovery Configs %q (Namespace %q / Resource Group %q): %s", name, namespaceName, resourceGroup, err) + if _, err := client.Delete(ctx, *id); err != nil { + return fmt.Errorf("deleting %s: %+v", *id, err) } - if _, err := client.Delete(ctx, resourceGroup, namespaceName, name); err != nil { - return fmt.Errorf("Error issuing delete request for EventHub Namespace Disaster Recovery Configs %q (Namespace %q / Resource Group %q): %s", name, namespaceName, resourceGroup, err) + deadline, ok := ctx.Deadline() + if !ok { + return fmt.Errorf("context has no deadline") } // no future for deletion so wait for it to vanish - deleteWait := &resource.StateChangeConf{ + deleteWait := &pluginsdk.StateChangeConf{ Pending: []string{"200"}, Target: []string{"404"}, MinTimeout: 30 * time.Second, - Timeout: d.Timeout(schema.TimeoutDelete), + Timeout: time.Until(deadline), Refresh: func() (interface{}, string, error) { - resp, err := client.Get(ctx, resourceGroup, namespaceName, name) + resp, err := client.Get(ctx, *id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { - return resp, strconv.Itoa(resp.StatusCode), nil + if response.WasNotFound(resp.HttpResponse) { + return resp, strconv.Itoa(resp.HttpResponse.StatusCode), nil } - return nil, "nil", fmt.Errorf("Error polling for the status of the EventHub Namespace Disaster Recovery Configs %q deletion (Namespace %q / Resource Group %q): %v", name, namespaceName, resourceGroup, err) + return nil, "nil", fmt.Errorf("polling to check the deletion state for %s: %+v", *id, err) } - return resp, strconv.Itoa(resp.StatusCode), nil + return resp, strconv.Itoa(resp.HttpResponse.StatusCode), nil }, } - if _, err := deleteWait.WaitForState(); err != nil { - return fmt.Errorf("Error waiting the deletion of EventHub Namespace Disaster Recovery Configs %q deletion (Namespace %q / Resource Group %q): %v", name, namespaceName, resourceGroup, err) + if _, err := deleteWait.WaitForStateContext(ctx); err != nil { + return fmt.Errorf("waiting the deletion of %s: %+v", *id, err) } // it can take some time for the name to become available again - // this is mainly here to enable updating the resource in place - nameFreeWait := &resource.StateChangeConf{ + // this is mainly here to enable updating the resource in place + parentNamespaceId := checknameavailabilitydisasterrecoveryconfigs.NewNamespaceID(id.SubscriptionId, id.ResourceGroup, id.NamespaceName) + availabilityClient := meta.(*clients.Client).Eventhub.DisasterRecoveryNameAvailabilityClient + nameFreeWait := &pluginsdk.StateChangeConf{ Pending: []string{"NameInUse"}, Target: []string{"None"}, MinTimeout: 30 * time.Second, - Timeout: d.Timeout(schema.TimeoutDelete), + Timeout: d.Timeout(pluginsdk.TimeoutDelete), Refresh: func() (interface{}, string, error) { - resp, err := client.CheckNameAvailability(ctx, resourceGroup, namespaceName, eventhub.CheckNameAvailabilityParameter{Name: utils.String(name)}) + input := checknameavailabilitydisasterrecoveryconfigs.CheckNameAvailabilityParameter{ + Name: id.Name, + } + resp, err := availabilityClient.DisasterRecoveryConfigsCheckNameAvailability(ctx, parentNamespaceId, input) if err != nil { - return resp, "Error", fmt.Errorf("Error checking if the EventHub Namespace Disaster Recovery Configs %q name has been freed (Namespace %q / Resource Group %q): %v", name, namespaceName, resourceGroup, err) + return resp, "Error", fmt.Errorf("waiting for the name of %s to become free: %v", *id, err) } - - return resp, string(resp.Reason), nil + // TODO: new crash to handle here + return resp, string(*resp.Model.Reason), nil }, } - if _, err := nameFreeWait.WaitForState(); err != nil { - return fmt.Errorf("Error waiting the the EventHub Namespace Disaster Recovery Configs %q name to be available (Namespace %q / Resource Group %q): %v", name, namespaceName, resourceGroup, err) + if _, err := nameFreeWait.WaitForStateContext(ctx); err != nil { + return err } return nil } -func resourceEventHubNamespaceDisasterRecoveryConfigWaitForState(ctx context.Context, client *eventhub.DisasterRecoveryConfigsClient, resourceGroup, namespaceName, name string, timeout time.Duration) error { - stateConf := &resource.StateChangeConf{ - Pending: []string{string(eventhub.ProvisioningStateDRAccepted)}, - Target: []string{string(eventhub.ProvisioningStateDRSucceeded)}, +func resourceEventHubNamespaceDisasterRecoveryConfigWaitForState(ctx context.Context, client *disasterrecoveryconfigs.DisasterRecoveryConfigsClient, id disasterrecoveryconfigs.DisasterRecoveryConfigId) error { + deadline, ok := ctx.Deadline() + if !ok { + return fmt.Errorf("context had no deadline") + } + stateConf := &pluginsdk.StateChangeConf{ + Pending: []string{string(disasterrecoveryconfigs.ProvisioningStateDRAccepted)}, + Target: []string{string(disasterrecoveryconfigs.ProvisioningStateDRSucceeded)}, MinTimeout: 30 * time.Second, - Timeout: timeout, + Timeout: time.Until(deadline), Refresh: func() (interface{}, string, error) { - read, err := client.Get(ctx, resourceGroup, namespaceName, name) + read, err := client.Get(ctx, id) if err != nil { - return nil, "error", fmt.Errorf("Wait read EventHub Namespace Disaster Recovery Configs %q (Namespace %q / Resource Group %q): %v", name, namespaceName, resourceGroup, err) + return nil, "error", fmt.Errorf("retrieving %s: %+v", id, err) } - if props := read.ArmDisasterRecoveryProperties; props != nil { - if props.ProvisioningState == eventhub.ProvisioningStateDRFailed { - return read, "failed", fmt.Errorf("Replication for EventHub Namespace Disaster Recovery Configs %q (Namespace %q / Resource Group %q) failed!", name, namespaceName, resourceGroup) + if model := read.Model; model != nil { + if props := model.Properties; props != nil { + if props.ProvisioningState == nil { + return read, "failed", fmt.Errorf("provisioningState was empty") + } + + if *props.ProvisioningState == disasterrecoveryconfigs.ProvisioningStateDRFailed { + return read, "failed", fmt.Errorf("replication failed for %s: %+v", id, err) + } + return read, string(*props.ProvisioningState), nil } - return read, string(props.ProvisioningState), nil } - return read, "nil", fmt.Errorf("Waiting for replication error EventHub Namespace Disaster Recovery Configs %q (Namespace %q / Resource Group %q): provisioning state is nil", name, namespaceName, resourceGroup) + return read, "nil", fmt.Errorf("waiting for replication of %s: %+v", id, err) }, } - _, err := stateConf.WaitForState() + _, err := stateConf.WaitForStateContext(ctx) return err } diff --git a/azurerm/internal/services/eventhub/eventhub_namespace_disaster_recovery_config_resource_test.go b/azurerm/internal/services/eventhub/eventhub_namespace_disaster_recovery_config_resource_test.go index 501c861304e8..27959fc2b187 100644 --- a/azurerm/internal/services/eventhub/eventhub_namespace_disaster_recovery_config_resource_test.go +++ b/azurerm/internal/services/eventhub/eventhub_namespace_disaster_recovery_config_resource_test.go @@ -5,12 +5,11 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -21,10 +20,10 @@ func TestAccEventHubNamespaceDisasterRecoveryConfig_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace_disaster_recovery_config", "test") r := EventHubNamespaceDisasterRecoveryConfigResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -36,16 +35,16 @@ func TestAccEventHubNamespaceDisasterRecoveryConfig_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace_disaster_recovery_config", "test") r := EventHubNamespaceDisasterRecoveryConfigResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Config: r.updated(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -56,22 +55,18 @@ func TestAccEventHubNamespaceDisasterRecoveryConfig_update(t *testing.T) { }) } -func (EventHubNamespaceDisasterRecoveryConfigResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { - id, err := azure.ParseAzureResourceID(state.ID) +func (EventHubNamespaceDisasterRecoveryConfigResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := disasterrecoveryconfigs.DisasterRecoveryConfigID(state.ID) if err != nil { return nil, err } - name := id.Path["disasterRecoveryConfigs"] - resourceGroup := id.ResourceGroup - namespaceName := id.Path["namespaces"] - - resp, err := clients.Eventhub.DisasterRecoveryConfigsClient.Get(ctx, resourceGroup, namespaceName, name) + resp, err := clients.Eventhub.DisasterRecoveryConfigsClient.Get(ctx, *id) if err != nil { - return nil, fmt.Errorf("retrieving EventHub Namespace Disaster Recovery Configs %q (namespace %q / resource group: %q): %v", name, namespaceName, id.ResourceGroup, err) + return nil, fmt.Errorf("retrieving %s: %+v", *id, err) } - return utils.Bool(resp.ArmDisasterRecoveryProperties != nil), nil + return utils.Bool(resp.Model != nil), nil } func (EventHubNamespaceDisasterRecoveryConfigResource) basic(data acceptance.TestData) string { diff --git a/azurerm/internal/services/eventhub/eventhub_namespace_resource.go b/azurerm/internal/services/eventhub/eventhub_namespace_resource.go index 5b5b36d4a377..08a5f702542a 100644 --- a/azurerm/internal/services/eventhub/eventhub_namespace_resource.go +++ b/azurerm/internal/services/eventhub/eventhub_namespace_resource.go @@ -5,21 +5,23 @@ import ( "fmt" "log" "strconv" + "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/preview/eventhub/mgmt/2018-01-01-preview/eventhub" "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/namespaces" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/networkrulesets" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -31,8 +33,8 @@ var ( eventHubNamespaceResourceName = "azurerm_eventhub_namespace" ) -func resourceEventHubNamespace() *schema.Resource { - return &schema.Resource{ +func resourceEventHubNamespace() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceEventHubNamespaceCreateUpdate, Read: resourceEventHubNamespaceRead, Update: resourceEventHubNamespaceCreateUpdate, @@ -43,16 +45,16 @@ func resourceEventHubNamespace() *schema.Resource { return err }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.ValidateEventHubNamespaceName(), @@ -63,62 +65,63 @@ func resourceEventHubNamespace() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "sku": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.StringInSlice([]string{ - string(eventhub.Basic), - string(eventhub.Standard), + string(namespaces.SkuNameBasic), + string(namespaces.SkuNameStandard), + string(namespaces.SkuNamePremium), }, true), }, "capacity": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Default: 1, }, "auto_inflate_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, "zone_redundant": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, ForceNew: true, }, "dedicated_cluster_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validate.ClusterID, }, "identity": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ - string(eventhub.SystemAssigned), + string(namespaces.ManagedServiceIdentityTypeSystemAssigned), }, false), }, "principal_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "tenant_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -126,55 +129,54 @@ func resourceEventHubNamespace() *schema.Resource { }, "maximum_throughput_units": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Computed: true, ValidateFunc: validation.IntBetween(0, 20), }, "network_rulesets": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, Computed: true, - ConfigMode: schema.SchemaConfigModeAttr, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - + ConfigMode: pluginsdk.SchemaConfigModeAttr, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "default_action": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ - string(eventhub.Allow), - string(eventhub.Deny), + string(networkrulesets.DefaultActionAllow), + string(networkrulesets.DefaultActionDeny), }, false), }, "trusted_service_access_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, }, // 128 limit per https://docs.microsoft.com/azure/event-hubs/event-hubs-quotas "virtual_network_rule": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 128, - ConfigMode: schema.SchemaConfigModeAttr, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + ConfigMode: pluginsdk.SchemaConfigModeAttr, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ // the API returns the subnet ID's resource group name in lowercase // https://github.com/Azure/azure-sdk-for-go/issues/5855 "subnet_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: azure.ValidateResourceID, DiffSuppressFunc: suppress.CaseDifference, }, "ignore_missing_virtual_network_service_endpoint": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, }, }, @@ -183,23 +185,23 @@ func resourceEventHubNamespace() *schema.Resource { // 128 limit per https://docs.microsoft.com/azure/event-hubs/event-hubs-quotas "ip_rule": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 128, - ConfigMode: schema.SchemaConfigModeAttr, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + ConfigMode: pluginsdk.SchemaConfigModeAttr, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "ip_mask": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "action": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, - Default: string(eventhub.NetworkRuleIPActionAllow), + Default: string(networkrulesets.NetworkRuleIPActionAllow), ValidateFunc: validation.StringInSlice([]string{ - string(eventhub.NetworkRuleIPActionAllow), + string(networkrulesets.NetworkRuleIPActionAllow), }, false), }, }, @@ -210,65 +212,80 @@ func resourceEventHubNamespace() *schema.Resource { }, "default_primary_connection_string_alias": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "default_secondary_connection_string_alias": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "default_primary_connection_string": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "default_primary_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "default_secondary_connection_string": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "default_secondary_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "tags": tags.Schema(), }, + CustomizeDiff: pluginsdk.CustomizeDiffShim(func(ctx context.Context, d *pluginsdk.ResourceDiff, v interface{}) error { + oldSku, newSku := d.GetChange("sku") + if d.HasChange("sku") { + if strings.EqualFold(newSku.(string), string(namespaces.SkuNamePremium)) || strings.EqualFold(oldSku.(string), string(namespaces.SkuTierPremium)) { + log.Printf("[DEBUG] cannot migrate a namespace from or to Premium SKU") + d.ForceNew("sku") + } + if strings.EqualFold(newSku.(string), string(namespaces.SkuTierPremium)) { + zoneRedundant := d.Get("zone_redundant").(bool) + if !zoneRedundant { + return fmt.Errorf("zone_redundant needs to be set to true when using premium SKU") + } + } + } + return nil + }), } } -func resourceEventHubNamespaceCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceEventHubNamespaceCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Eventhub.NamespacesClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() log.Printf("[INFO] preparing arguments for AzureRM EventHub Namespace creation.") - name := d.Get("name").(string) - resGroup := d.Get("resource_group_name").(string) - + id := namespaces.NewNamespaceID(subscriptionId, d.Get("resource_group_name").(string), d.Get("name").(string)) if d.IsNewResource() { - existing, err := client.Get(ctx, resGroup, name) + existing, err := client.Get(ctx, id) if err != nil { - if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("Error checking for presence of existing EventHub Namespace %q (Resource Group %q): %s", name, resGroup, err) + if !response.WasNotFound(existing.HttpResponse) { + return fmt.Errorf("checking for presence of existing %s: %+v", id, err) } } - if existing.ID != nil && *existing.ID != "" { - return tf.ImportAsExistsError("azurerm_eventhub_namespace", *existing.ID) + if existing.Model != nil { + return tf.ImportAsExistsError("azurerm_eventhub_namespace", id.ID()) } } @@ -279,27 +296,30 @@ func resourceEventHubNamespaceCreateUpdate(d *schema.ResourceData, meta interfac autoInflateEnabled := d.Get("auto_inflate_enabled").(bool) zoneRedundant := d.Get("zone_redundant").(bool) - parameters := eventhub.EHNamespace{ + parameters := namespaces.EHNamespace{ Location: &location, - Sku: &eventhub.Sku{ - Name: eventhub.SkuName(sku), - Tier: eventhub.SkuTier(sku), - Capacity: &capacity, + Sku: &namespaces.Sku{ + Name: namespaces.SkuName(sku), + Tier: func() *namespaces.SkuTier { + v := namespaces.SkuTier(sku) + return &v + }(), + Capacity: utils.Int64(int64(capacity)), }, Identity: expandEventHubIdentity(d.Get("identity").([]interface{})), - EHNamespaceProperties: &eventhub.EHNamespaceProperties{ + Properties: &namespaces.EHNamespaceProperties{ IsAutoInflateEnabled: utils.Bool(autoInflateEnabled), ZoneRedundant: utils.Bool(zoneRedundant), }, - Tags: tags.Expand(t), + Tags: expandTags(t), } if v := d.Get("dedicated_cluster_id").(string); v != "" { - parameters.EHNamespaceProperties.ClusterArmID = utils.String(v) + parameters.Properties.ClusterArmId = utils.String(v) } if v, ok := d.GetOk("maximum_throughput_units"); ok { - parameters.EHNamespaceProperties.MaximumThroughputUnits = utils.Int32(int32(v.(int))) + parameters.Properties.MaximumThroughputUnits = utils.Int64(int64(v.(int))) } // @favoretti: if we are downgrading from Standard to Basic SKU and namespace had both autoInflate enabled and @@ -307,46 +327,35 @@ func resourceEventHubNamespaceCreateUpdate(d *schema.ResourceData, meta interfac // // See: https://github.com/terraform-providers/terraform-provider-azurerm/issues/10244 // - if parameters.Sku.Tier == eventhub.SkuTierBasic && !autoInflateEnabled { - parameters.EHNamespaceProperties.MaximumThroughputUnits = utils.Int32(0) + if *parameters.Sku.Tier == namespaces.SkuTierBasic && !autoInflateEnabled { + parameters.Properties.MaximumThroughputUnits = utils.Int64(0) } - future, err := client.CreateOrUpdate(ctx, resGroup, name, parameters) - if err != nil { - return err + if err := client.CreateOrUpdateThenPoll(ctx, id, parameters); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) } - if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error creating eventhub namespace: %+v", err) - } - - read, err := client.Get(ctx, resGroup, name) - if err != nil { - return err - } - - if read.ID == nil { - return fmt.Errorf("Cannot read EventHub Namespace %q (resource group %q) ID", name, resGroup) - } - - d.SetId(*read.ID) + d.SetId(id.ID()) ruleSets, hasRuleSets := d.GetOk("network_rulesets") if hasRuleSets { - rulesets := eventhub.NetworkRuleSet{ - NetworkRuleSetProperties: expandEventHubNamespaceNetworkRuleset(ruleSets.([]interface{})), + rulesets := networkrulesets.NetworkRuleSet{ + Properties: expandEventHubNamespaceNetworkRuleset(ruleSets.([]interface{})), } // cannot use network rulesets with the basic SKU - if parameters.Sku.Name != eventhub.Basic { - if _, err := client.CreateOrUpdateNetworkRuleSet(ctx, resGroup, name, rulesets); err != nil { - return fmt.Errorf("Error setting network ruleset properties for EventHub Namespace %q (resource group %q): %v", name, resGroup, err) + if parameters.Sku.Name != namespaces.SkuNameBasic { + ruleSetsClient := meta.(*clients.Client).Eventhub.NetworkRuleSetsClient + namespaceId := networkrulesets.NewNamespaceID(id.SubscriptionId, id.ResourceGroup, id.Name) + if _, err := ruleSetsClient.NamespacesCreateOrUpdateNetworkRuleSet(ctx, namespaceId, rulesets); err != nil { + return fmt.Errorf("setting network ruleset properties for %s: %+v", id, err) } - } else { + } else if rulesets.Properties != nil { + props := rulesets.Properties // so if the user has specified the non default rule sets throw a validation error - if rulesets.DefaultAction != eventhub.Deny || - (rulesets.IPRules != nil && len(*rulesets.IPRules) > 0) || - (rulesets.VirtualNetworkRules != nil && len(*rulesets.VirtualNetworkRules) > 0) { + if *props.DefaultAction != networkrulesets.DefaultActionDeny || + (props.IpRules != nil && len(*props.IpRules) > 0) || + (props.VirtualNetworkRules != nil && len(*props.VirtualNetworkRules) > 0) { return fmt.Errorf("network_rulesets cannot be used when the SKU is basic") } } @@ -355,135 +364,155 @@ func resourceEventHubNamespaceCreateUpdate(d *schema.ResourceData, meta interfac return resourceEventHubNamespaceRead(d, meta) } -func resourceEventHubNamespaceRead(d *schema.ResourceData, meta interface{}) error { +func resourceEventHubNamespaceRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Eventhub.NamespacesClient + authorizationKeysClient := meta.(*clients.Client).Eventhub.NamespaceAuthorizationRulesClient + ruleSetsClient := meta.(*clients.Client).Eventhub.NetworkRuleSetsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.NamespaceID(d.Id()) + id, err := namespaces.ParseNamespaceID(d.Id()) if err != nil { return err } - resp, err := client.Get(ctx, id.ResourceGroup, id.Name) + resp, err := client.Get(ctx, *id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { d.SetId("") return nil } - return fmt.Errorf("Error making Read request on EventHub Namespace %q: %+v", id.Name, err) + return fmt.Errorf("retrieving %s: %+v", *id, err) } - d.Set("name", resp.Name) + d.Set("name", id.Name) d.Set("resource_group_name", id.ResourceGroup) - if location := resp.Location; location != nil { - d.Set("location", azure.NormalizeLocation(*location)) - } - if sku := resp.Sku; sku != nil { - d.Set("sku", string(sku.Name)) - d.Set("capacity", sku.Capacity) - } + if model := resp.Model; model != nil { + d.Set("location", location.NormalizeNilable(model.Location)) - if err := d.Set("identity", flattenEventHubIdentity(resp.Identity)); err != nil { - return fmt.Errorf("Error setting `identity`: %+v", err) - } + if sku := model.Sku; sku != nil { + d.Set("sku", string(sku.Name)) + d.Set("capacity", sku.Capacity) + } + + if err := d.Set("identity", flattenEventHubIdentity(model.Identity)); err != nil { + return fmt.Errorf("setting `identity`: %+v", err) + } + + if props := model.Properties; props != nil { + d.Set("auto_inflate_enabled", props.IsAutoInflateEnabled) + d.Set("maximum_throughput_units", int(*props.MaximumThroughputUnits)) + d.Set("zone_redundant", props.ZoneRedundant) + d.Set("dedicated_cluster_id", props.ClusterArmId) + } - if props := resp.EHNamespaceProperties; props != nil { - d.Set("auto_inflate_enabled", props.IsAutoInflateEnabled) - d.Set("maximum_throughput_units", int(*props.MaximumThroughputUnits)) - d.Set("zone_redundant", props.ZoneRedundant) - d.Set("dedicated_cluster_id", props.ClusterArmID) + if err := tags.FlattenAndSet(d, flattenTags(model.Tags)); err != nil { + return fmt.Errorf("setting `tags`: %+v", err) + } } - ruleset, err := client.GetNetworkRuleSet(ctx, id.ResourceGroup, id.Name) + namespaceId := networkrulesets.NewNamespaceID(id.SubscriptionId, id.ResourceGroup, id.Name) + ruleset, err := ruleSetsClient.NamespacesGetNetworkRuleSet(ctx, namespaceId) if err != nil { - return fmt.Errorf("Error making Read request on EventHub Namespace %q Network Ruleset: %+v", id.Name, err) + return fmt.Errorf("retrieving Network Rule Sets for %s: %+v", *id, err) } if err := d.Set("network_rulesets", flattenEventHubNamespaceNetworkRuleset(ruleset)); err != nil { return fmt.Errorf("Error setting `network_ruleset` for Evenhub Namespace %s: %v", id.Name, err) } - keys, err := client.ListKeys(ctx, id.ResourceGroup, id.Name, eventHubNamespaceDefaultAuthorizationRule) + authorizationRuleId := authorizationrulesnamespaces.NewAuthorizationRuleID(id.SubscriptionId, id.ResourceGroup, id.Name, eventHubNamespaceDefaultAuthorizationRule) + keys, err := authorizationKeysClient.NamespacesListKeys(ctx, authorizationRuleId) if err != nil { log.Printf("[WARN] Unable to List default keys for EventHub Namespace %q: %+v", id.Name, err) - } else { - d.Set("default_primary_connection_string_alias", keys.AliasPrimaryConnectionString) - d.Set("default_secondary_connection_string_alias", keys.AliasSecondaryConnectionString) - d.Set("default_primary_connection_string", keys.PrimaryConnectionString) - d.Set("default_secondary_connection_string", keys.SecondaryConnectionString) - d.Set("default_primary_key", keys.PrimaryKey) - d.Set("default_secondary_key", keys.SecondaryKey) } - return tags.FlattenAndSet(d, resp.Tags) + if model := keys.Model; model != nil { + d.Set("default_primary_connection_string_alias", model.AliasPrimaryConnectionString) + d.Set("default_secondary_connection_string_alias", model.AliasSecondaryConnectionString) + d.Set("default_primary_connection_string", model.PrimaryConnectionString) + d.Set("default_secondary_connection_string", model.SecondaryConnectionString) + d.Set("default_primary_key", model.PrimaryKey) + d.Set("default_secondary_key", model.SecondaryKey) + } + + return nil } -func resourceEventHubNamespaceDelete(d *schema.ResourceData, meta interface{}) error { +func resourceEventHubNamespaceDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Eventhub.NamespacesClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.NamespaceID(d.Id()) + id, err := namespaces.ParseNamespaceID(d.Id()) if err != nil { return err } - future, err := client.Delete(ctx, id.ResourceGroup, id.Name) + future, err := client.Delete(ctx, *id) if err != nil { - if response.WasNotFound(future.Response()) { + if response.WasNotFound(future.HttpResponse) { return nil } - return fmt.Errorf("Error issuing delete request of EventHub Namespace %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + return fmt.Errorf("deleting %s: %+v", *id, err) } - return waitForEventHubNamespaceToBeDeleted(ctx, client, id.ResourceGroup, id.Name, d) + return waitForEventHubNamespaceToBeDeleted(ctx, client, *id) } -func waitForEventHubNamespaceToBeDeleted(ctx context.Context, client *eventhub.NamespacesClient, resourceGroup, name string, d *schema.ResourceData) error { +func waitForEventHubNamespaceToBeDeleted(ctx context.Context, client *namespaces.NamespacesClient, id namespaces.NamespaceId) error { + deadline, ok := ctx.Deadline() + if !ok { + return fmt.Errorf("context has no deadline") + } + // we can't use the Waiter here since the API returns a 200 once it's deleted which is considered a polling status code.. - log.Printf("[DEBUG] Waiting for EventHub Namespace (%q in Resource Group %q) to be deleted", name, resourceGroup) - stateConf := &resource.StateChangeConf{ + log.Printf("[DEBUG] Waiting for %s to be deleted..", id) + stateConf := &pluginsdk.StateChangeConf{ Pending: []string{"200"}, Target: []string{"404"}, - Refresh: eventHubNamespaceStateStatusCodeRefreshFunc(ctx, client, resourceGroup, name), - Timeout: d.Timeout(schema.TimeoutDelete), + Refresh: eventHubNamespaceStateStatusCodeRefreshFunc(ctx, client, id), + Timeout: time.Until(deadline), } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for EventHub NameSpace (%q in Resource Group %q) to be deleted: %+v", name, resourceGroup, err) + if _, err := stateConf.WaitForStateContext(ctx); err != nil { + return fmt.Errorf("waiting for %s to be deleted: %+v", id, err) } return nil } -func eventHubNamespaceStateStatusCodeRefreshFunc(ctx context.Context, client *eventhub.NamespacesClient, resourceGroup, name string) resource.StateRefreshFunc { +func eventHubNamespaceStateStatusCodeRefreshFunc(ctx context.Context, client *namespaces.NamespacesClient, id namespaces.NamespaceId) pluginsdk.StateRefreshFunc { return func() (interface{}, string, error) { - res, err := client.Get(ctx, resourceGroup, name) - - log.Printf("Retrieving EventHub Namespace %q (Resource Group %q) returned Status %d", resourceGroup, name, res.StatusCode) + res, err := client.Get(ctx, id) + if res.HttpResponse != nil { + log.Printf("Retrieving %s returned Status %d", id, res.HttpResponse.StatusCode) + } if err != nil { - if utils.ResponseWasNotFound(res.Response) { - return res, strconv.Itoa(res.StatusCode), nil + if response.WasNotFound(res.HttpResponse) { + return res, strconv.Itoa(res.HttpResponse.StatusCode), nil } - return nil, "", fmt.Errorf("Error polling for the status of the EventHub Namespace %q (RG: %q): %+v", name, resourceGroup, err) + return nil, "", fmt.Errorf("polling for the status of %s: %+v", id, err) } - return res, strconv.Itoa(res.StatusCode), nil + return res, strconv.Itoa(res.HttpResponse.StatusCode), nil } } -func expandEventHubNamespaceNetworkRuleset(input []interface{}) *eventhub.NetworkRuleSetProperties { +func expandEventHubNamespaceNetworkRuleset(input []interface{}) *networkrulesets.NetworkRuleSetProperties { if len(input) == 0 { return nil } block := input[0].(map[string]interface{}) - ruleset := eventhub.NetworkRuleSetProperties{ - DefaultAction: eventhub.DefaultAction(block["default_action"].(string)), + ruleset := networkrulesets.NetworkRuleSetProperties{ + DefaultAction: func() *networkrulesets.DefaultAction { + v := networkrulesets.DefaultAction(block["default_action"].(string)) + return &v + }(), } if v, ok := block["trusted_service_access_enabled"]; ok { @@ -492,12 +521,12 @@ func expandEventHubNamespaceNetworkRuleset(input []interface{}) *eventhub.Networ if v, ok := block["virtual_network_rule"].([]interface{}); ok { if len(v) > 0 { - var rules []eventhub.NWRuleSetVirtualNetworkRules + var rules []networkrulesets.NWRuleSetVirtualNetworkRules for _, r := range v { rblock := r.(map[string]interface{}) - rules = append(rules, eventhub.NWRuleSetVirtualNetworkRules{ - Subnet: &eventhub.Subnet{ - ID: utils.String(rblock["subnet_id"].(string)), + rules = append(rules, networkrulesets.NWRuleSetVirtualNetworkRules{ + Subnet: &networkrulesets.Subnet{ + Id: utils.String(rblock["subnet_id"].(string)), }, IgnoreMissingVnetServiceEndpoint: utils.Bool(rblock["ignore_missing_virtual_network_service_endpoint"].(bool)), }) @@ -509,34 +538,37 @@ func expandEventHubNamespaceNetworkRuleset(input []interface{}) *eventhub.Networ if v, ok := block["ip_rule"].([]interface{}); ok { if len(v) > 0 { - var rules []eventhub.NWRuleSetIPRules + var rules []networkrulesets.NWRuleSetIpRules for _, r := range v { rblock := r.(map[string]interface{}) - rules = append(rules, eventhub.NWRuleSetIPRules{ - IPMask: utils.String(rblock["ip_mask"].(string)), - Action: eventhub.NetworkRuleIPAction(rblock["action"].(string)), + rules = append(rules, networkrulesets.NWRuleSetIpRules{ + IpMask: utils.String(rblock["ip_mask"].(string)), + Action: func() *networkrulesets.NetworkRuleIPAction { + v := networkrulesets.NetworkRuleIPAction(rblock["action"].(string)) + return &v + }(), }) } - ruleset.IPRules = &rules + ruleset.IpRules = &rules } } return &ruleset } -func flattenEventHubNamespaceNetworkRuleset(ruleset eventhub.NetworkRuleSet) []interface{} { - if ruleset.NetworkRuleSetProperties == nil { +func flattenEventHubNamespaceNetworkRuleset(ruleset networkrulesets.NamespacesGetNetworkRuleSetResponse) []interface{} { + if ruleset.Model == nil || ruleset.Model.Properties == nil { return nil } vnetBlocks := make([]interface{}, 0) - if vnetRules := ruleset.NetworkRuleSetProperties.VirtualNetworkRules; vnetRules != nil { + if vnetRules := ruleset.Model.Properties.VirtualNetworkRules; vnetRules != nil { for _, vnetRule := range *vnetRules { block := make(map[string]interface{}) if s := vnetRule.Subnet; s != nil { - if v := s.ID; v != nil { + if v := s.Id; v != nil { block["subnet_id"] = *v } } @@ -549,13 +581,18 @@ func flattenEventHubNamespaceNetworkRuleset(ruleset eventhub.NetworkRuleSet) []i } } ipBlocks := make([]interface{}, 0) - if ipRules := ruleset.NetworkRuleSetProperties.IPRules; ipRules != nil { + if ipRules := ruleset.Model.Properties.IpRules; ipRules != nil { for _, ipRule := range *ipRules { block := make(map[string]interface{}) - block["action"] = string(ipRule.Action) + action := "" + if ipRule.Action != nil { + action = string(*ipRule.Action) + } - if v := ipRule.IPMask; v != nil { + block["action"] = action + + if v := ipRule.IpMask; v != nil { block["ip_mask"] = *v } @@ -563,43 +600,53 @@ func flattenEventHubNamespaceNetworkRuleset(ruleset eventhub.NetworkRuleSet) []i } } + // TODO: fix this + return []interface{}{map[string]interface{}{ - "default_action": string(ruleset.DefaultAction), + "default_action": string(*ruleset.Model.Properties.DefaultAction), "virtual_network_rule": vnetBlocks, "ip_rule": ipBlocks, - "trusted_service_access_enabled": ruleset.TrustedServiceAccessEnabled, + "trusted_service_access_enabled": ruleset.Model.Properties.TrustedServiceAccessEnabled, }} } -func expandEventHubIdentity(input []interface{}) *eventhub.Identity { +func expandEventHubIdentity(input []interface{}) *namespaces.Identity { if len(input) == 0 { return nil } v := input[0].(map[string]interface{}) - return &eventhub.Identity{ - Type: eventhub.IdentityType(v["type"].(string)), + return &namespaces.Identity{ + Type: func() *namespaces.ManagedServiceIdentityType { + v := namespaces.ManagedServiceIdentityType(v["type"].(string)) + return &v + }(), } } -func flattenEventHubIdentity(input *eventhub.Identity) []interface{} { +func flattenEventHubIdentity(input *namespaces.Identity) []interface{} { if input == nil { return []interface{}{} } + identityType := "" + if input.Type != nil { + identityType = string(*input.Type) + } + principalID := "" - if input.PrincipalID != nil { - principalID = *input.PrincipalID + if input.PrincipalId != nil { + principalID = *input.PrincipalId } tenantID := "" - if input.TenantID != nil { - tenantID = *input.TenantID + if input.TenantId != nil { + tenantID = *input.TenantId } return []interface{}{ map[string]interface{}{ - "type": string(input.Type), + "type": identityType, "principal_id": principalID, "tenant_id": tenantID, }, diff --git a/azurerm/internal/services/eventhub/eventhub_namespace_resource_test.go b/azurerm/internal/services/eventhub/eventhub_namespace_resource_test.go index 2b2b6ee90d84..693e1264c7cc 100644 --- a/azurerm/internal/services/eventhub/eventhub_namespace_resource_test.go +++ b/azurerm/internal/services/eventhub/eventhub_namespace_resource_test.go @@ -6,12 +6,11 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/namespaces" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -22,10 +21,10 @@ func TestAccEventHubNamespace_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -37,10 +36,10 @@ func TestAccEventHubNamespace_basicWithIdentity(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicWithIdentity(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -52,17 +51,17 @@ func TestAccEventHubNamespace_basicUpdateIdentity(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.basicWithIdentity(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -74,10 +73,10 @@ func TestAccEventHubNamespace_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -92,10 +91,10 @@ func TestAccEventHubNamespace_standard(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.standard(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -107,10 +106,10 @@ func TestAccEventHubNamespace_standardWithIdentity(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.standardWithIdentity(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -122,17 +121,17 @@ func TestAccEventHubNamespace_standardUpdateIdentity(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.standard(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.standardWithIdentity(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -144,10 +143,10 @@ func TestAccEventHubNamespace_networkrule_iprule_trusted_services(t *testing.T) data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkrule_iprule_trusted_services(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -159,10 +158,10 @@ func TestAccEventHubNamespace_networkrule_iprule(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkrule_iprule(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -174,10 +173,10 @@ func TestAccEventHubNamespace_networkrule_vnet(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkrule_vnet(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -189,10 +188,10 @@ func TestAccEventHubNamespace_networkruleVnetIpRule(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.networkruleVnetIpRule(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("network_rulesets.0.virtual_network_rule.#").HasValue("2"), check.That(data.ResourceName).Key("network_rulesets.0.ip_rule.#").HasValue("2"), @@ -206,15 +205,15 @@ func TestAccEventHubNamespace_readDefaultKeys(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - resource.TestMatchResourceAttr(data.ResourceName, "default_primary_connection_string", regexp.MustCompile("Endpoint=.+")), - resource.TestMatchResourceAttr(data.ResourceName, "default_secondary_connection_string", regexp.MustCompile("Endpoint=.+")), - resource.TestMatchResourceAttr(data.ResourceName, "default_primary_key", regexp.MustCompile(".+")), - resource.TestMatchResourceAttr(data.ResourceName, "default_secondary_key", regexp.MustCompile(".+")), + acceptance.TestMatchResourceAttr(data.ResourceName, "default_primary_connection_string", regexp.MustCompile("Endpoint=.+")), + acceptance.TestMatchResourceAttr(data.ResourceName, "default_secondary_connection_string", regexp.MustCompile("Endpoint=.+")), + acceptance.TestMatchResourceAttr(data.ResourceName, "default_primary_key", regexp.MustCompile(".+")), + acceptance.TestMatchResourceAttr(data.ResourceName, "default_secondary_key", regexp.MustCompile(".+")), ), }, }) @@ -224,20 +223,20 @@ func TestAccEventHubNamespace_withAliasConnectionString(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { // `default_primary_connection_string_alias` and `default_secondary_connection_string_alias` are still `nil` in `azurerm_eventhub_namespace` after created `azurerm_eventhub_namespace` successfully since `azurerm_eventhub_namespace_disaster_recovery_config` hasn't been created. // So these two properties should be checked in the second run. Config: r.withAliasConnectionString(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Config: r.withAliasConnectionString(data), - Check: resource.ComposeTestCheckFunc( - resource.TestMatchResourceAttr(data.ResourceName, "default_primary_connection_string_alias", regexp.MustCompile("Endpoint=.+")), - resource.TestMatchResourceAttr(data.ResourceName, "default_secondary_connection_string_alias", regexp.MustCompile("Endpoint=.+")), + Check: acceptance.ComposeTestCheckFunc( + acceptance.TestMatchResourceAttr(data.ResourceName, "default_primary_connection_string_alias", regexp.MustCompile("Endpoint=.+")), + acceptance.TestMatchResourceAttr(data.ResourceName, "default_secondary_connection_string_alias", regexp.MustCompile("Endpoint=.+")), ), }, data.ImportStep(), @@ -248,10 +247,10 @@ func TestAccEventHubNamespace_maximumThroughputUnits(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.maximumThroughputUnits(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -263,10 +262,10 @@ func TestAccEventHubNamespace_zoneRedundant(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.zoneRedundant(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -278,10 +277,10 @@ func TestAccEventHubNamespace_dedicatedClusterID(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.dedicatedClusterID(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -293,10 +292,10 @@ func TestAccEventHubNamespace_NonStandardCasing(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.nonStandardCasing(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -312,18 +311,21 @@ func TestAccEventHubNamespace_BasicWithTagsUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, { Config: r.basicWithTagsUpdate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("tags.%").HasValue("1"), + check.That(data.ResourceName).Key("tags.%").HasValue("3"), + check.That(data.ResourceName).Key("tags.environment").HasValue("Production"), + check.That(data.ResourceName).Key("tags.sauce").HasValue("Hot"), + check.That(data.ResourceName).Key("tags.terraform").HasValue("true"), ), }, }) @@ -333,10 +335,10 @@ func TestAccEventHubNamespace_BasicWithCapacity(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.capacity(data, 20), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("capacity").HasValue("20"), ), @@ -348,17 +350,17 @@ func TestAccEventHubNamespace_BasicWithCapacityUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.capacity(data, 20), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("capacity").HasValue("20"), ), }, { Config: r.capacity(data, 2), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("capacity").HasValue("2"), ), @@ -370,22 +372,30 @@ func TestAccEventHubNamespace_BasicWithSkuUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("sku").HasValue("Basic"), ), }, { Config: r.standard(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("sku").HasValue("Standard"), check.That(data.ResourceName).Key("capacity").HasValue("2"), ), }, + { + Config: r.premium(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("sku").HasValue("Premium"), + check.That(data.ResourceName).Key("capacity").HasValue("1"), + ), + }, }) } @@ -393,10 +403,10 @@ func TestAccEventHubNamespace_SkuDowngradeFromAutoInflateWithMaxThroughput(t *te data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.maximumThroughputUnits(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("sku").HasValue("Standard"), check.That(data.ResourceName).Key("capacity").HasValue("2"), @@ -404,7 +414,7 @@ func TestAccEventHubNamespace_SkuDowngradeFromAutoInflateWithMaxThroughput(t *te }, { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("sku").HasValue("Basic"), ), @@ -416,10 +426,10 @@ func TestAccEventHubNamespace_maximumThroughputUnitsUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.maximumThroughputUnits(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("sku").HasValue("Standard"), check.That(data.ResourceName).Key("capacity").HasValue("2"), @@ -428,7 +438,7 @@ func TestAccEventHubNamespace_maximumThroughputUnitsUpdate(t *testing.T) { }, { Config: r.maximumThroughputUnitsUpdate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("sku").HasValue("Standard"), check.That(data.ResourceName).Key("capacity").HasValue("1"), @@ -442,28 +452,28 @@ func TestAccEventHubNamespace_autoInfalteDisabledWithAutoInflateUnits(t *testing data := acceptance.BuildTestData(t, "azurerm_eventhub_namespace", "test") r := EventHubNamespaceResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.autoInfalteDisabledWithAutoInflateUnits(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, }) } -func (EventHubNamespaceResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { - id, err := parse.NamespaceID(state.ID) +func (EventHubNamespaceResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := namespaces.ParseNamespaceID(state.ID) if err != nil { return nil, err } - resp, err := clients.Eventhub.NamespacesClient.Get(ctx, id.ResourceGroup, id.Name) + resp, err := clients.Eventhub.NamespacesClient.Get(ctx, *id) if err != nil { return nil, fmt.Errorf("retrieving %s: %v", id.String(), err) } - return utils.Bool(resp.EHNamespaceProperties != nil), nil + return utils.Bool(resp.Model != nil), nil } func (EventHubNamespaceResource) basic(data acceptance.TestData) string { @@ -586,6 +596,28 @@ resource "azurerm_eventhub_namespace" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger) } +func (EventHubNamespaceResource) premium(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-eh-%d" + location = "%s" +} + +resource "azurerm_eventhub_namespace" "test" { + name = "acctesteventhubnamespace-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + sku = "Premium" + capacity = "1" + zone_redundant = true +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +} + func (EventHubNamespaceResource) standardWithIdentity(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { @@ -896,6 +928,8 @@ resource "azurerm_eventhub_namespace" "test" { tags = { environment = "Production" + sauce = "Hot" + terraform = "true" } } `, data.RandomInteger, data.Locations.Primary, data.RandomInteger) diff --git a/azurerm/internal/services/eventhub/eventhub_resource.go b/azurerm/internal/services/eventhub/eventhub_resource.go index 53b210fbfa39..c5692a3e7303 100644 --- a/azurerm/internal/services/eventhub/eventhub_resource.go +++ b/azurerm/internal/services/eventhub/eventhub_resource.go @@ -5,24 +5,24 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/preview/eventhub/mgmt/2018-01-01-preview/eventhub" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/eventhubs" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) var eventHubResourceName = "azurerm_eventhub" -func resourceEventHub() *schema.Resource { - return &schema.Resource{ +func resourceEventHub() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceEventHubCreateUpdate, Read: resourceEventHubRead, Update: resourceEventHubCreateUpdate, @@ -33,23 +33,23 @@ func resourceEventHub() *schema.Resource { return err }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.ValidateEventHubName(), }, "namespace_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.ValidateEventHubNamespaceName(), @@ -58,62 +58,62 @@ func resourceEventHub() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "partition_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, ForceNew: true, ValidateFunc: validate.ValidateEventHubPartitionCount, }, "message_retention": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, ValidateFunc: validate.ValidateEventHubMessageRetentionCount, }, "capture_description": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Required: true, }, "skip_empty_archives": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, "encoding": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.StringInSlice([]string{ - string(eventhub.Avro), - string(eventhub.AvroDeflate), + string(eventhubs.EncodingCaptureDescriptionAvro), + string(eventhubs.EncodingCaptureDescriptionAvroDeflate), }, true), }, "interval_in_seconds": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Default: 300, ValidateFunc: validation.IntBetween(60, 900), }, "size_limit_in_bytes": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, Default: 314572800, ValidateFunc: validation.IntBetween(10485760, 524288000), }, "destination": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ "EventHubArchive.AzureBlockBlob", @@ -123,16 +123,16 @@ func resourceEventHub() *schema.Resource { }, false), }, "archive_name_format": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.ValidateEventHubArchiveNameFormat, }, "blob_container_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "storage_account_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: azure.ValidateResourceID, }, @@ -143,17 +143,28 @@ func resourceEventHub() *schema.Resource { }, }, + "status": { + Type: pluginsdk.TypeString, + Optional: true, + Default: string(eventhubs.EntityStatusActive), + ValidateFunc: validation.StringInSlice([]string{ + string(eventhubs.EntityStatusActive), + string(eventhubs.EntityStatusDisabled), + string(eventhubs.EntityStatusSendDisabled), + }, false), + }, + "partition_ids": { - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Type: pluginsdk.TypeSet, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, Computed: true, }, }, } } -func resourceEventHubCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceEventHubCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Eventhub.EventHubsClient subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) @@ -161,28 +172,30 @@ func resourceEventHubCreateUpdate(d *schema.ResourceData, meta interface{}) erro log.Printf("[INFO] preparing arguments for Azure ARM EventHub creation.") - id := parse.NewEventHubID(subscriptionId, d.Get("resource_group_name").(string), d.Get("namespace_name").(string), d.Get("name").(string)) + id := eventhubs.NewEventhubID(subscriptionId, d.Get("resource_group_name").(string), d.Get("namespace_name").(string), d.Get("name").(string)) if d.IsNewResource() { - existing, err := client.Get(ctx, id.ResourceGroup, id.NamespaceName, id.Name) + existing, err := client.Get(ctx, id) if err != nil { - if !utils.ResponseWasNotFound(existing.Response) { + if !response.WasNotFound(existing.HttpResponse) { return fmt.Errorf("checking for presence of existing %s: %s", id, err) } } - if !utils.ResponseWasNotFound(existing.Response) { + if !response.WasNotFound(existing.HttpResponse) { return tf.ImportAsExistsError("azurerm_eventhub", id.ID()) } } partitionCount := int64(d.Get("partition_count").(int)) messageRetention := int64(d.Get("message_retention").(int)) + eventhubStatus := eventhubs.EntityStatus(d.Get("status").(string)) - parameters := eventhub.Model{ - Properties: &eventhub.Properties{ + parameters := eventhubs.Eventhub{ + Properties: &eventhubs.EventhubProperties{ PartitionCount: &partitionCount, MessageRetentionInDays: &messageRetention, + Status: &eventhubStatus, }, } @@ -190,7 +203,7 @@ func resourceEventHubCreateUpdate(d *schema.ResourceData, meta interface{}) erro parameters.Properties.CaptureDescription = expandEventHubCaptureDescription(d) } - if _, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.NamespaceName, id.Name, parameters); err != nil { + if _, err := client.CreateOrUpdate(ctx, id, parameters); err != nil { return err } @@ -199,19 +212,19 @@ func resourceEventHubCreateUpdate(d *schema.ResourceData, meta interface{}) erro return resourceEventHubRead(d, meta) } -func resourceEventHubRead(d *schema.ResourceData, meta interface{}) error { +func resourceEventHubRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Eventhub.EventHubsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.EventHubID(d.Id()) + id, err := eventhubs.EventhubID(d.Id()) if err != nil { return err } - resp, err := client.Get(ctx, id.ResourceGroup, id.NamespaceName, id.Name) + resp, err := client.Get(ctx, *id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { d.SetId("") return nil } @@ -222,33 +235,36 @@ func resourceEventHubRead(d *schema.ResourceData, meta interface{}) error { d.Set("namespace_name", id.NamespaceName) d.Set("resource_group_name", id.ResourceGroup) - if props := resp.Properties; props != nil { - d.Set("partition_count", props.PartitionCount) - d.Set("message_retention", props.MessageRetentionInDays) - d.Set("partition_ids", props.PartitionIds) + if model := resp.Model; model != nil { + if props := model.Properties; props != nil { + d.Set("partition_count", props.PartitionCount) + d.Set("message_retention", props.MessageRetentionInDays) + d.Set("partition_ids", props.PartitionIds) + d.Set("status", string(*props.Status)) - captureDescription := flattenEventHubCaptureDescription(props.CaptureDescription) - if err := d.Set("capture_description", captureDescription); err != nil { - return err + captureDescription := flattenEventHubCaptureDescription(props.CaptureDescription) + if err := d.Set("capture_description", captureDescription); err != nil { + return err + } } } return nil } -func resourceEventHubDelete(d *schema.ResourceData, meta interface{}) error { +func resourceEventHubDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Eventhub.EventHubsClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.EventHubID(d.Id()) + id, err := eventhubs.EventhubID(d.Id()) if err != nil { return err } - resp, err := client.Delete(ctx, id.ResourceGroup, id.NamespaceName, id.Name) + resp, err := client.Delete(ctx, *id) if err != nil { - if utils.ResponseWasNotFound(resp) { + if response.WasNotFound(resp.HttpResponse) { return nil } @@ -258,7 +274,7 @@ func resourceEventHubDelete(d *schema.ResourceData, meta interface{}) error { return nil } -func expandEventHubCaptureDescription(d *schema.ResourceData) *eventhub.CaptureDescription { +func expandEventHubCaptureDescription(d *pluginsdk.ResourceData) *eventhubs.CaptureDescription { inputs := d.Get("capture_description").([]interface{}) input := inputs[0].(map[string]interface{}) @@ -268,11 +284,14 @@ func expandEventHubCaptureDescription(d *schema.ResourceData) *eventhub.CaptureD sizeLimitInBytes := input["size_limit_in_bytes"].(int) skipEmptyArchives := input["skip_empty_archives"].(bool) - captureDescription := eventhub.CaptureDescription{ - Enabled: utils.Bool(enabled), - Encoding: eventhub.EncodingCaptureDescription(encoding), - IntervalInSeconds: utils.Int32(int32(intervalInSeconds)), - SizeLimitInBytes: utils.Int32(int32(sizeLimitInBytes)), + captureDescription := eventhubs.CaptureDescription{ + Enabled: utils.Bool(enabled), + Encoding: func() *eventhubs.EncodingCaptureDescription { + v := eventhubs.EncodingCaptureDescription(encoding) + return &v + }(), + IntervalInSeconds: utils.Int64(int64(intervalInSeconds)), + SizeLimitInBytes: utils.Int64(int64(sizeLimitInBytes)), SkipEmptyArchives: utils.Bool(skipEmptyArchives), } @@ -286,12 +305,12 @@ func expandEventHubCaptureDescription(d *schema.ResourceData) *eventhub.CaptureD blobContainerName := destination["blob_container_name"].(string) storageAccountId := destination["storage_account_id"].(string) - captureDescription.Destination = &eventhub.Destination{ + captureDescription.Destination = &eventhubs.Destination{ Name: utils.String(destinationName), - DestinationProperties: &eventhub.DestinationProperties{ + Properties: &eventhubs.DestinationProperties{ ArchiveNameFormat: utils.String(archiveNameFormat), BlobContainer: utils.String(blobContainerName), - StorageAccountResourceID: utils.String(storageAccountId), + StorageAccountResourceId: utils.String(storageAccountId), }, } } @@ -300,7 +319,7 @@ func expandEventHubCaptureDescription(d *schema.ResourceData) *eventhub.CaptureD return &captureDescription } -func flattenEventHubCaptureDescription(description *eventhub.CaptureDescription) []interface{} { +func flattenEventHubCaptureDescription(description *eventhubs.CaptureDescription) []interface{} { results := make([]interface{}, 0) if description != nil { @@ -314,7 +333,11 @@ func flattenEventHubCaptureDescription(description *eventhub.CaptureDescription) output["skip_empty_archives"] = *skipEmptyArchives } - output["encoding"] = string(description.Encoding) + encoding := "" + if description.Encoding != nil { + encoding = string(*description.Encoding) + } + output["encoding"] = encoding if interval := description.IntervalInSeconds; interval != nil { output["interval_in_seconds"] = *interval @@ -331,14 +354,14 @@ func flattenEventHubCaptureDescription(description *eventhub.CaptureDescription) destinationOutput["name"] = *name } - if props := destination.DestinationProperties; props != nil { + if props := destination.Properties; props != nil { if archiveNameFormat := props.ArchiveNameFormat; archiveNameFormat != nil { destinationOutput["archive_name_format"] = *archiveNameFormat } if blobContainerName := props.BlobContainer; blobContainerName != nil { destinationOutput["blob_container_name"] = *blobContainerName } - if storageAccountId := props.StorageAccountResourceID; storageAccountId != nil { + if storageAccountId := props.StorageAccountResourceId; storageAccountId != nil { destinationOutput["storage_account_id"] = *storageAccountId } } diff --git a/azurerm/internal/services/eventhub/eventhub_resource_test.go b/azurerm/internal/services/eventhub/eventhub_resource_test.go index 587d2ca0dda6..0ec7732d9f3d 100644 --- a/azurerm/internal/services/eventhub/eventhub_resource_test.go +++ b/azurerm/internal/services/eventhub/eventhub_resource_test.go @@ -6,16 +6,13 @@ import ( "strconv" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/parse" - - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/validate" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" - - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/sdk/eventhubs" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/eventhub/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) type EventHubResource struct { @@ -181,10 +178,10 @@ func TestAccEventHub_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub", "test") r := EventHubResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data, 2), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -196,10 +193,10 @@ func TestAccEventHub_basicOnePartition(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub", "test") r := EventHubResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data, 1), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("partition_count").HasValue("1"), ), @@ -212,10 +209,10 @@ func TestAccEventHub_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub", "test") r := EventHubResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data, 2), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -230,17 +227,17 @@ func TestAccEventHub_partitionCountUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub", "test") r := EventHubResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data, 2), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("partition_count").HasValue("2"), ), }, { Config: r.partitionCountUpdate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("partition_count").HasValue("10"), ), @@ -252,10 +249,10 @@ func TestAccEventHub_standard(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub", "test") r := EventHubResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.standard(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -267,10 +264,10 @@ func TestAccEventHub_captureDescription(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub", "test") r := EventHubResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.captureDescription(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("capture_description.0.enabled").HasValue("true"), check.That(data.ResourceName).Key("capture_description.0.skip_empty_archives").HasValue("true"), @@ -284,17 +281,17 @@ func TestAccEventHub_captureDescriptionDisabled(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub", "test") r := EventHubResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.captureDescription(data, true), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("capture_description.0.enabled").HasValue("true"), ), }, { Config: r.captureDescription(data, false), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("capture_description.0.enabled").HasValue("false"), ), @@ -306,17 +303,17 @@ func TestAccEventHub_messageRetentionUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_eventhub", "test") r := EventHubResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.standard(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("message_retention").HasValue("7"), ), }, { Config: r.messageRetentionUpdate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("message_retention").HasValue("5"), ), @@ -324,18 +321,40 @@ func TestAccEventHub_messageRetentionUpdate(t *testing.T) { }) } -func (EventHubResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { - id, err := parse.EventHubID(state.ID) +func TestAccEventHub_eventhubStatus(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_eventhub", "test") + r := EventHubResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.eventhubStatus(data, "Active"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.eventhubStatus(data, "Disabled"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func (EventHubResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := eventhubs.EventhubID(state.ID) if err != nil { return nil, err } - resp, err := clients.Eventhub.EventHubsClient.Get(ctx, id.ResourceGroup, id.NamespaceName, id.Name) + resp, err := clients.Eventhub.EventHubsClient.Get(ctx, *id) if err != nil { - return nil, fmt.Errorf("retrieving %s: %v", id, err) + return nil, fmt.Errorf("retrieving %s: %v", *id, err) } - return utils.Bool(resp.Properties != nil), nil + return utils.Bool(resp.Model != nil), nil } func (EventHubResource) basic(data acceptance.TestData, partitionCount int) string { @@ -522,3 +541,32 @@ resource "azurerm_eventhub" "test" { } `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) } + +func (EventHubResource) eventhubStatus(data acceptance.TestData, status string) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-eventhub-%d" + location = "%s" +} + +resource "azurerm_eventhub_namespace" "test" { + name = "acctesteventhubnamespace-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + sku = "Basic" +} + +resource "azurerm_eventhub" "test" { + name = "acctesteventhub-%d" + namespace_name = azurerm_eventhub_namespace.test.name + resource_group_name = azurerm_resource_group.test.name + partition_count = 5 + message_retention = 1 + status = "%s" +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, status) +} diff --git a/azurerm/internal/services/eventhub/helpers.go b/azurerm/internal/services/eventhub/helpers.go index c1329d042e18..1ec6857a4cec 100644 --- a/azurerm/internal/services/eventhub/helpers.go +++ b/azurerm/internal/services/eventhub/helpers.go @@ -5,103 +5,100 @@ import ( "fmt" "log" - "github.com/Azure/azure-sdk-for-go/services/preview/eventhub/mgmt/2018-01-01-preview/eventhub" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) // schema -func expandEventHubAuthorizationRuleRights(d *schema.ResourceData) *[]eventhub.AccessRights { - rights := make([]eventhub.AccessRights, 0) +func expandEventHubAuthorizationRuleRights(d *pluginsdk.ResourceData) []string { + rights := make([]string, 0) if d.Get("listen").(bool) { - rights = append(rights, eventhub.Listen) + rights = append(rights, "Listen") } if d.Get("send").(bool) { - rights = append(rights, eventhub.SendEnumValue) + rights = append(rights, "Send") } if d.Get("manage").(bool) { - rights = append(rights, eventhub.Manage) + rights = append(rights, "Manage") } - return &rights + return rights } -func flattenEventHubAuthorizationRuleRights(rights *[]eventhub.AccessRights) (listen, send, manage bool) { +func flattenEventHubAuthorizationRuleRights(rights []string) (listen, send, manage bool) { // zero (initial) value for a bool in go is false - if rights != nil { - for _, right := range *rights { - switch right { - case eventhub.Listen: - listen = true - case eventhub.SendEnumValue: - send = true - case eventhub.Manage: - manage = true - default: - log.Printf("[DEBUG] Unknown Authorization Rule Right '%s'", right) - } + for _, right := range rights { + switch right { + case "Listen": + listen = true + case "Send": + send = true + case "Manage": + manage = true + default: + log.Printf("[DEBUG] Unknown Authorization Rule Right '%s'", right) } } return listen, send, manage } -func eventHubAuthorizationRuleSchemaFrom(s map[string]*schema.Schema) map[string]*schema.Schema { - authSchema := map[string]*schema.Schema{ +func eventHubAuthorizationRuleSchemaFrom(s map[string]*pluginsdk.Schema) map[string]*pluginsdk.Schema { + authSchema := map[string]*pluginsdk.Schema{ "listen": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, "manage": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, "primary_connection_string": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "primary_connection_string_alias": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "primary_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "secondary_connection_string": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "secondary_connection_string_alias": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "secondary_key": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, Sensitive: true, }, "send": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, @@ -109,7 +106,7 @@ func eventHubAuthorizationRuleSchemaFrom(s map[string]*schema.Schema) map[string return azure.MergeSchema(s, authSchema) } -func eventHubAuthorizationRuleCustomizeDiff(ctx context.Context, d *schema.ResourceDiff, _ interface{}) error { +func eventHubAuthorizationRuleCustomizeDiff(ctx context.Context, d *pluginsdk.ResourceDiff, _ interface{}) error { listen, hasListen := d.GetOk("listen") send, hasSend := d.GetOk("send") manage, hasManage := d.GetOk("manage") diff --git a/azurerm/internal/services/eventhub/migration/namespace_authorization_rule.go b/azurerm/internal/services/eventhub/migration/namespace_authorization_rule.go index 4b5199b4907b..e37fae6d8033 100644 --- a/azurerm/internal/services/eventhub/migration/namespace_authorization_rule.go +++ b/azurerm/internal/services/eventhub/migration/namespace_authorization_rule.go @@ -5,7 +5,6 @@ import ( "log" "strings" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) @@ -54,21 +53,21 @@ func (NamespaceAuthorizationRuleV1ToV2) UpgradeFunc() pluginsdk.StateUpgraderFun } func authorizationRuleSchemaForV0AndV1() map[string]*pluginsdk.Schema { - return map[string]*schema.Schema{ + return map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "namespace_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, "resource_group_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, }, diff --git a/azurerm/internal/services/eventhub/registration.go b/azurerm/internal/services/eventhub/registration.go index b66af015efd9..c3b6b53f5ce6 100644 --- a/azurerm/internal/services/eventhub/registration.go +++ b/azurerm/internal/services/eventhub/registration.go @@ -1,8 +1,8 @@ package eventhub import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/sdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) type Registration struct{} @@ -20,8 +20,8 @@ func (r Registration) WebsiteCategories() []string { } // SupportedDataSources returns the supported Data Sources supported by this Service -func (r Registration) SupportedDataSources() map[string]*schema.Resource { - return map[string]*schema.Resource{ +func (r Registration) SupportedDataSources() map[string]*pluginsdk.Resource { + return map[string]*pluginsdk.Resource{ "azurerm_eventhub": dataSourceEventHub(), "azurerm_eventhub_cluster": dataSourceEventHubCluster(), "azurerm_eventhub_authorization_rule": EventHubAuthorizationRuleDataSource(), @@ -32,22 +32,18 @@ func (r Registration) SupportedDataSources() map[string]*schema.Resource { } // SupportedResources returns the supported Resources supported by this Service -func (r Registration) SupportedResources() map[string]*schema.Resource { - return map[string]*schema.Resource{ +func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { + return map[string]*pluginsdk.Resource{ "azurerm_eventhub_authorization_rule": resourceEventHubAuthorizationRule(), "azurerm_eventhub_cluster": resourceEventHubCluster(), "azurerm_eventhub_namespace_authorization_rule": resourceEventHubNamespaceAuthorizationRule(), + "azurerm_eventhub_namespace_customer_managed_key": resourceEventHubNamespaceCustomerManagedKey(), "azurerm_eventhub_namespace_disaster_recovery_config": resourceEventHubNamespaceDisasterRecoveryConfig(), "azurerm_eventhub_namespace": resourceEventHubNamespace(), "azurerm_eventhub": resourceEventHub(), } } -// PackagePath is the relative path to this package -func (r Registration) PackagePath() string { - return "TODO" -} - // DataSources returns a list of Data Sources supported by this Service func (r Registration) DataSources() []sdk.DataSource { return []sdk.DataSource{} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/client.go b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/client.go new file mode 100644 index 000000000000..971a6130cdaa --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/client.go @@ -0,0 +1,15 @@ +package authorizationruleseventhubs + +import "github.com/Azure/go-autorest/autorest" + +type AuthorizationRulesEventHubsClient struct { + Client autorest.Client + baseUri string +} + +func NewAuthorizationRulesEventHubsClientWithBaseURI(endpoint string) AuthorizationRulesEventHubsClient { + return AuthorizationRulesEventHubsClient{ + Client: autorest.NewClientWithUserAgent(userAgent()), + baseUri: endpoint, + } +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/constants.go b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/constants.go new file mode 100644 index 000000000000..f5bb89cbb642 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/constants.go @@ -0,0 +1,8 @@ +package authorizationruleseventhubs + +type KeyType string + +const ( + KeyTypePrimaryKey KeyType = "PrimaryKey" + KeyTypeSecondaryKey KeyType = "SecondaryKey" +) diff --git a/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/id_authorizationrule.go b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/id_authorizationrule.go new file mode 100644 index 000000000000..30978c862ff0 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/id_authorizationrule.go @@ -0,0 +1,144 @@ +package authorizationruleseventhubs + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +type AuthorizationRuleId struct { + SubscriptionId string + ResourceGroup string + NamespaceName string + EventhubName string + Name string +} + +func NewAuthorizationRuleID(subscriptionId, resourceGroup, namespaceName, eventhubName, name string) AuthorizationRuleId { + return AuthorizationRuleId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + NamespaceName: namespaceName, + EventhubName: eventhubName, + Name: name, + } +} + +func (id AuthorizationRuleId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Eventhub Name %q", id.EventhubName), + fmt.Sprintf("Namespace Name %q", id.NamespaceName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Authorization Rule", segmentsStr) +} + +func (id AuthorizationRuleId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.EventHub/namespaces/%s/eventhubs/%s/authorizationRules/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.NamespaceName, id.EventhubName, id.Name) +} + +// AuthorizationRuleID parses a AuthorizationRule ID into an AuthorizationRuleId struct +func AuthorizationRuleID(input string) (*AuthorizationRuleId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := AuthorizationRuleId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.NamespaceName, err = id.PopSegment("namespaces"); err != nil { + return nil, err + } + if resourceId.EventhubName, err = id.PopSegment("eventhubs"); err != nil { + return nil, err + } + if resourceId.Name, err = id.PopSegment("authorizationRules"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} + +// AuthorizationRuleIDInsensitively parses an AuthorizationRule ID into an AuthorizationRuleId struct, insensitively +// This should only be used to parse an ID for rewriting to a consistent casing, +// the AuthorizationRuleID method should be used instead for validation etc. +func AuthorizationRuleIDInsensitively(input string) (*AuthorizationRuleId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := AuthorizationRuleId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + // find the correct casing for the 'namespaces' segment + namespacesKey := "namespaces" + for key := range id.Path { + if strings.EqualFold(key, namespacesKey) { + namespacesKey = key + break + } + } + if resourceId.NamespaceName, err = id.PopSegment(namespacesKey); err != nil { + return nil, err + } + + // find the correct casing for the 'eventhubs' segment + eventhubsKey := "eventhubs" + for key := range id.Path { + if strings.EqualFold(key, eventhubsKey) { + eventhubsKey = key + break + } + } + if resourceId.EventhubName, err = id.PopSegment(eventhubsKey); err != nil { + return nil, err + } + + // find the correct casing for the 'authorizationRules' segment + authorizationRulesKey := "authorizationRules" + for key := range id.Path { + if strings.EqualFold(key, authorizationRulesKey) { + authorizationRulesKey = key + break + } + } + if resourceId.Name, err = id.PopSegment(authorizationRulesKey); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/id_authorizationrule_test.go b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/id_authorizationrule_test.go new file mode 100644 index 000000000000..e50f8db8f5b6 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/id_authorizationrule_test.go @@ -0,0 +1,297 @@ +package authorizationruleseventhubs + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.Id = AuthorizationRuleId{} + +func TestAuthorizationRuleIDFormatter(t *testing.T) { + actual := NewAuthorizationRuleID("{subscriptionId}", "{resourceGroupName}", "{namespaceName}", "{eventHubName}", "{authorizationRuleName}").ID() + expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestAuthorizationRuleID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *AuthorizationRuleId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // missing EventhubName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/", + Error: true, + }, + + { + // missing value for EventhubName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}", + Expected: &AuthorizationRuleId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + EventhubName: "{eventHubName}", + Name: "{authorizationRuleName}", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.EVENTHUB/NAMESPACES/{NAMESPACENAME}/EVENTHUBS/{EVENTHUBNAME}/AUTHORIZATIONRULES/{AUTHORIZATIONRULENAME}", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := AuthorizationRuleID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + if actual.EventhubName != v.Expected.EventhubName { + t.Fatalf("Expected %q but got %q for EventhubName", v.Expected.EventhubName, actual.EventhubName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} + +func TestAuthorizationRuleIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *AuthorizationRuleId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // missing EventhubName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/", + Error: true, + }, + + { + // missing value for EventhubName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}", + Expected: &AuthorizationRuleId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + EventhubName: "{eventHubName}", + Name: "{authorizationRuleName}", + }, + }, + + { + // lower-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationrules/{authorizationRuleName}", + Expected: &AuthorizationRuleId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + EventhubName: "{eventHubName}", + Name: "{authorizationRuleName}", + }, + }, + + { + // upper-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NAMESPACES/{namespaceName}/EVENTHUBS/{eventHubName}/AUTHORIZATIONRULES/{authorizationRuleName}", + Expected: &AuthorizationRuleId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + EventhubName: "{eventHubName}", + Name: "{authorizationRuleName}", + }, + }, + + { + // mixed-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NaMeSpAcEs/{namespaceName}/EvEnThUbS/{eventHubName}/AuThOrIzAtIoNrUlEs/{authorizationRuleName}", + Expected: &AuthorizationRuleId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + EventhubName: "{eventHubName}", + Name: "{authorizationRuleName}", + }, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := AuthorizationRuleIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + if actual.EventhubName != v.Expected.EventhubName { + t.Fatalf("Expected %q but got %q for EventhubName", v.Expected.EventhubName, actual.EventhubName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/id_eventhub.go b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/id_eventhub.go new file mode 100644 index 000000000000..6d9ae68f3064 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/id_eventhub.go @@ -0,0 +1,126 @@ +package authorizationruleseventhubs + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +type EventhubId struct { + SubscriptionId string + ResourceGroup string + NamespaceName string + Name string +} + +func NewEventhubID(subscriptionId, resourceGroup, namespaceName, name string) EventhubId { + return EventhubId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + NamespaceName: namespaceName, + Name: name, + } +} + +func (id EventhubId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Namespace Name %q", id.NamespaceName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Eventhub", segmentsStr) +} + +func (id EventhubId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.EventHub/namespaces/%s/eventhubs/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.NamespaceName, id.Name) +} + +// EventhubID parses a Eventhub ID into an EventhubId struct +func EventhubID(input string) (*EventhubId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := EventhubId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.NamespaceName, err = id.PopSegment("namespaces"); err != nil { + return nil, err + } + if resourceId.Name, err = id.PopSegment("eventhubs"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} + +// EventhubIDInsensitively parses an Eventhub ID into an EventhubId struct, insensitively +// This should only be used to parse an ID for rewriting to a consistent casing, +// the EventhubID method should be used instead for validation etc. +func EventhubIDInsensitively(input string) (*EventhubId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := EventhubId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + // find the correct casing for the 'namespaces' segment + namespacesKey := "namespaces" + for key := range id.Path { + if strings.EqualFold(key, namespacesKey) { + namespacesKey = key + break + } + } + if resourceId.NamespaceName, err = id.PopSegment(namespacesKey); err != nil { + return nil, err + } + + // find the correct casing for the 'eventhubs' segment + eventhubsKey := "eventhubs" + for key := range id.Path { + if strings.EqualFold(key, eventhubsKey) { + eventhubsKey = key + break + } + } + if resourceId.Name, err = id.PopSegment(eventhubsKey); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/id_eventhub_test.go b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/id_eventhub_test.go new file mode 100644 index 000000000000..7c18b27b03b5 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/id_eventhub_test.go @@ -0,0 +1,262 @@ +package authorizationruleseventhubs + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.Id = EventhubId{} + +func TestEventhubIDFormatter(t *testing.T) { + actual := NewEventhubID("{subscriptionId}", "{resourceGroupName}", "{namespaceName}", "{eventHubName}").ID() + expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestEventhubID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *EventhubId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}", + Expected: &EventhubId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{eventHubName}", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.EVENTHUB/NAMESPACES/{NAMESPACENAME}/EVENTHUBS/{EVENTHUBNAME}", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := EventhubID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} + +func TestEventhubIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *EventhubId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}", + Expected: &EventhubId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{eventHubName}", + }, + }, + + { + // lower-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}", + Expected: &EventhubId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{eventHubName}", + }, + }, + + { + // upper-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NAMESPACES/{namespaceName}/EVENTHUBS/{eventHubName}", + Expected: &EventhubId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{eventHubName}", + }, + }, + + { + // mixed-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NaMeSpAcEs/{namespaceName}/EvEnThUbS/{eventHubName}", + Expected: &EventhubId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{eventHubName}", + }, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := EventhubIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/method_eventhubscreateorupdateauthorizationrule_autorest.go b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/method_eventhubscreateorupdateauthorizationrule_autorest.go new file mode 100644 index 000000000000..0fb8c86df9c8 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/method_eventhubscreateorupdateauthorizationrule_autorest.go @@ -0,0 +1,65 @@ +package authorizationruleseventhubs + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type EventHubsCreateOrUpdateAuthorizationRuleResponse struct { + HttpResponse *http.Response + Model *AuthorizationRule +} + +// EventHubsCreateOrUpdateAuthorizationRule ... +func (c AuthorizationRulesEventHubsClient) EventHubsCreateOrUpdateAuthorizationRule(ctx context.Context, id AuthorizationRuleId, input AuthorizationRule) (result EventHubsCreateOrUpdateAuthorizationRuleResponse, err error) { + req, err := c.preparerForEventHubsCreateOrUpdateAuthorizationRule(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationruleseventhubs.AuthorizationRulesEventHubsClient", "EventHubsCreateOrUpdateAuthorizationRule", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationruleseventhubs.AuthorizationRulesEventHubsClient", "EventHubsCreateOrUpdateAuthorizationRule", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForEventHubsCreateOrUpdateAuthorizationRule(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationruleseventhubs.AuthorizationRulesEventHubsClient", "EventHubsCreateOrUpdateAuthorizationRule", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForEventHubsCreateOrUpdateAuthorizationRule prepares the EventHubsCreateOrUpdateAuthorizationRule request. +func (c AuthorizationRulesEventHubsClient) preparerForEventHubsCreateOrUpdateAuthorizationRule(ctx context.Context, id AuthorizationRuleId, input AuthorizationRule) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForEventHubsCreateOrUpdateAuthorizationRule handles the response to the EventHubsCreateOrUpdateAuthorizationRule request. The method always +// closes the http.Response Body. +func (c AuthorizationRulesEventHubsClient) responderForEventHubsCreateOrUpdateAuthorizationRule(resp *http.Response) (result EventHubsCreateOrUpdateAuthorizationRuleResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/method_eventhubslistauthorizationrules_autorest.go b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/method_eventhubslistauthorizationrules_autorest.go new file mode 100644 index 000000000000..56e22e8cbc02 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/method_eventhubslistauthorizationrules_autorest.go @@ -0,0 +1,196 @@ +package authorizationruleseventhubs + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type EventHubsListAuthorizationRulesResponse struct { + HttpResponse *http.Response + Model *[]AuthorizationRule + + nextLink *string + nextPageFunc func(ctx context.Context, nextLink string) (EventHubsListAuthorizationRulesResponse, error) +} + +type EventHubsListAuthorizationRulesCompleteResult struct { + Items []AuthorizationRule +} + +func (r EventHubsListAuthorizationRulesResponse) HasMore() bool { + return r.nextLink != nil +} + +func (r EventHubsListAuthorizationRulesResponse) LoadMore(ctx context.Context) (resp EventHubsListAuthorizationRulesResponse, err error) { + if !r.HasMore() { + err = fmt.Errorf("no more pages returned") + return + } + return r.nextPageFunc(ctx, *r.nextLink) +} + +type AuthorizationRulePredicate struct { + // TODO: implement me +} + +func (p AuthorizationRulePredicate) Matches(input AuthorizationRule) bool { + // TODO: implement me + // if p.Name != nil && input.Name != *p.Name { + // return false + // } + + return true +} + +// EventHubsListAuthorizationRules ... +func (c AuthorizationRulesEventHubsClient) EventHubsListAuthorizationRules(ctx context.Context, id EventhubId) (resp EventHubsListAuthorizationRulesResponse, err error) { + req, err := c.preparerForEventHubsListAuthorizationRules(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationruleseventhubs.AuthorizationRulesEventHubsClient", "EventHubsListAuthorizationRules", nil, "Failure preparing request") + return + } + + resp.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationruleseventhubs.AuthorizationRulesEventHubsClient", "EventHubsListAuthorizationRules", resp.HttpResponse, "Failure sending request") + return + } + + resp, err = c.responderForEventHubsListAuthorizationRules(resp.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationruleseventhubs.AuthorizationRulesEventHubsClient", "EventHubsListAuthorizationRules", resp.HttpResponse, "Failure responding to request") + return + } + return +} + +// EventHubsListAuthorizationRulesCompleteMatchingPredicate retrieves all of the results into a single object +func (c AuthorizationRulesEventHubsClient) EventHubsListAuthorizationRulesComplete(ctx context.Context, id EventhubId) (EventHubsListAuthorizationRulesCompleteResult, error) { + return c.EventHubsListAuthorizationRulesCompleteMatchingPredicate(ctx, id, AuthorizationRulePredicate{}) +} + +// EventHubsListAuthorizationRulesCompleteMatchingPredicate retrieves all of the results and then applied the predicate +func (c AuthorizationRulesEventHubsClient) EventHubsListAuthorizationRulesCompleteMatchingPredicate(ctx context.Context, id EventhubId, predicate AuthorizationRulePredicate) (resp EventHubsListAuthorizationRulesCompleteResult, err error) { + items := make([]AuthorizationRule, 0) + + page, err := c.EventHubsListAuthorizationRules(ctx, id) + if err != nil { + err = fmt.Errorf("loading the initial page: %+v", err) + return + } + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + for page.HasMore() { + page, err = page.LoadMore(ctx) + if err != nil { + err = fmt.Errorf("loading the next page: %+v", err) + return + } + + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + } + + out := EventHubsListAuthorizationRulesCompleteResult{ + Items: items, + } + return out, nil +} + +// preparerForEventHubsListAuthorizationRules prepares the EventHubsListAuthorizationRules request. +func (c AuthorizationRulesEventHubsClient) preparerForEventHubsListAuthorizationRules(ctx context.Context, id EventhubId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/authorizationRules", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// preparerForEventHubsListAuthorizationRulesWithNextLink prepares the EventHubsListAuthorizationRules request with the given nextLink token. +func (c AuthorizationRulesEventHubsClient) preparerForEventHubsListAuthorizationRulesWithNextLink(ctx context.Context, nextLink string) (*http.Request, error) { + uri, err := url.Parse(nextLink) + if err != nil { + return nil, fmt.Errorf("parsing nextLink %q: %+v", nextLink, err) + } + queryParameters := map[string]interface{}{} + for k, v := range uri.Query() { + if len(v) == 0 { + continue + } + val := v[0] + val = autorest.Encode("query", val) + queryParameters[k] = val + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(uri.Path), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForEventHubsListAuthorizationRules handles the response to the EventHubsListAuthorizationRules request. The method always +// closes the http.Response Body. +func (c AuthorizationRulesEventHubsClient) responderForEventHubsListAuthorizationRules(resp *http.Response) (result EventHubsListAuthorizationRulesResponse, err error) { + type page struct { + Values []AuthorizationRule `json:"value"` + NextLink *string `json:"nextLink"` + } + var respObj page + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&respObj), + autorest.ByClosing()) + result.HttpResponse = resp + result.Model = &respObj.Values + result.nextLink = respObj.NextLink + if respObj.NextLink != nil { + result.nextPageFunc = func(ctx context.Context, nextLink string) (result EventHubsListAuthorizationRulesResponse, err error) { + req, err := c.preparerForEventHubsListAuthorizationRulesWithNextLink(ctx, nextLink) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationruleseventhubs.AuthorizationRulesEventHubsClient", "EventHubsListAuthorizationRules", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationruleseventhubs.AuthorizationRulesEventHubsClient", "EventHubsListAuthorizationRules", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForEventHubsListAuthorizationRules(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationruleseventhubs.AuthorizationRulesEventHubsClient", "EventHubsListAuthorizationRules", result.HttpResponse, "Failure responding to request") + return + } + + return + } + } + return +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/method_eventhubslistkeys_autorest.go b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/method_eventhubslistkeys_autorest.go new file mode 100644 index 000000000000..dbd8311325d0 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/method_eventhubslistkeys_autorest.go @@ -0,0 +1,65 @@ +package authorizationruleseventhubs + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type EventHubsListKeysResponse struct { + HttpResponse *http.Response + Model *AccessKeys +} + +// EventHubsListKeys ... +func (c AuthorizationRulesEventHubsClient) EventHubsListKeys(ctx context.Context, id AuthorizationRuleId) (result EventHubsListKeysResponse, err error) { + req, err := c.preparerForEventHubsListKeys(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationruleseventhubs.AuthorizationRulesEventHubsClient", "EventHubsListKeys", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationruleseventhubs.AuthorizationRulesEventHubsClient", "EventHubsListKeys", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForEventHubsListKeys(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationruleseventhubs.AuthorizationRulesEventHubsClient", "EventHubsListKeys", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForEventHubsListKeys prepares the EventHubsListKeys request. +func (c AuthorizationRulesEventHubsClient) preparerForEventHubsListKeys(ctx context.Context, id AuthorizationRuleId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/listKeys", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForEventHubsListKeys handles the response to the EventHubsListKeys request. The method always +// closes the http.Response Body. +func (c AuthorizationRulesEventHubsClient) responderForEventHubsListKeys(resp *http.Response) (result EventHubsListKeysResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/method_eventhubsregeneratekeys_autorest.go b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/method_eventhubsregeneratekeys_autorest.go new file mode 100644 index 000000000000..437b2f2d293f --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/method_eventhubsregeneratekeys_autorest.go @@ -0,0 +1,66 @@ +package authorizationruleseventhubs + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type EventHubsRegenerateKeysResponse struct { + HttpResponse *http.Response + Model *AccessKeys +} + +// EventHubsRegenerateKeys ... +func (c AuthorizationRulesEventHubsClient) EventHubsRegenerateKeys(ctx context.Context, id AuthorizationRuleId, input RegenerateAccessKeyParameters) (result EventHubsRegenerateKeysResponse, err error) { + req, err := c.preparerForEventHubsRegenerateKeys(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationruleseventhubs.AuthorizationRulesEventHubsClient", "EventHubsRegenerateKeys", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationruleseventhubs.AuthorizationRulesEventHubsClient", "EventHubsRegenerateKeys", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForEventHubsRegenerateKeys(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationruleseventhubs.AuthorizationRulesEventHubsClient", "EventHubsRegenerateKeys", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForEventHubsRegenerateKeys prepares the EventHubsRegenerateKeys request. +func (c AuthorizationRulesEventHubsClient) preparerForEventHubsRegenerateKeys(ctx context.Context, id AuthorizationRuleId, input RegenerateAccessKeyParameters) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/regenerateKeys", id.ID())), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForEventHubsRegenerateKeys handles the response to the EventHubsRegenerateKeys request. The method always +// closes the http.Response Body. +func (c AuthorizationRulesEventHubsClient) responderForEventHubsRegenerateKeys(resp *http.Response) (result EventHubsRegenerateKeysResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/model_accesskeys.go b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/model_accesskeys.go new file mode 100644 index 000000000000..d0b2fc753c3a --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/model_accesskeys.go @@ -0,0 +1,11 @@ +package authorizationruleseventhubs + +type AccessKeys struct { + AliasPrimaryConnectionString *string `json:"aliasPrimaryConnectionString,omitempty"` + AliasSecondaryConnectionString *string `json:"aliasSecondaryConnectionString,omitempty"` + KeyName *string `json:"keyName,omitempty"` + PrimaryConnectionString *string `json:"primaryConnectionString,omitempty"` + PrimaryKey *string `json:"primaryKey,omitempty"` + SecondaryConnectionString *string `json:"secondaryConnectionString,omitempty"` + SecondaryKey *string `json:"secondaryKey,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/model_authorizationrule.go b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/model_authorizationrule.go new file mode 100644 index 000000000000..a747d5013f25 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/model_authorizationrule.go @@ -0,0 +1,8 @@ +package authorizationruleseventhubs + +type AuthorizationRule struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *AuthorizationRuleProperties `json:"properties,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/model_authorizationruleproperties.go b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/model_authorizationruleproperties.go new file mode 100644 index 000000000000..9ed0fe419572 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/model_authorizationruleproperties.go @@ -0,0 +1,5 @@ +package authorizationruleseventhubs + +type AuthorizationRuleProperties struct { + Rights []string `json:"rights"` +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/model_regenerateaccesskeyparameters.go b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/model_regenerateaccesskeyparameters.go new file mode 100644 index 000000000000..f2734bdb613d --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/model_regenerateaccesskeyparameters.go @@ -0,0 +1,6 @@ +package authorizationruleseventhubs + +type RegenerateAccessKeyParameters struct { + Key *string `json:"key,omitempty"` + KeyType KeyType `json:"keyType"` +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/version.go b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/version.go new file mode 100644 index 000000000000..7bbf960fde4c --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationruleseventhubs/version.go @@ -0,0 +1,9 @@ +package authorizationruleseventhubs + +import "fmt" + +const defaultApiVersion = "2017-04-01" + +func userAgent() string { + return fmt.Sprintf("pandora/authorizationruleseventhubs/%s", defaultApiVersion) +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/client.go b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/client.go new file mode 100644 index 000000000000..0d9867a6c6ae --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/client.go @@ -0,0 +1,15 @@ +package authorizationrulesnamespaces + +import "github.com/Azure/go-autorest/autorest" + +type AuthorizationRulesNamespacesClient struct { + Client autorest.Client + baseUri string +} + +func NewAuthorizationRulesNamespacesClientWithBaseURI(endpoint string) AuthorizationRulesNamespacesClient { + return AuthorizationRulesNamespacesClient{ + Client: autorest.NewClientWithUserAgent(userAgent()), + baseUri: endpoint, + } +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/constants.go b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/constants.go new file mode 100644 index 000000000000..cd787fdd5797 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/constants.go @@ -0,0 +1,8 @@ +package authorizationrulesnamespaces + +type KeyType string + +const ( + KeyTypePrimaryKey KeyType = "PrimaryKey" + KeyTypeSecondaryKey KeyType = "SecondaryKey" +) diff --git a/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/id_authorizationrule.go b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/id_authorizationrule.go new file mode 100644 index 000000000000..f6c1161e46ac --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/id_authorizationrule.go @@ -0,0 +1,126 @@ +package authorizationrulesnamespaces + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +type AuthorizationRuleId struct { + SubscriptionId string + ResourceGroup string + NamespaceName string + Name string +} + +func NewAuthorizationRuleID(subscriptionId, resourceGroup, namespaceName, name string) AuthorizationRuleId { + return AuthorizationRuleId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + NamespaceName: namespaceName, + Name: name, + } +} + +func (id AuthorizationRuleId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Namespace Name %q", id.NamespaceName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Authorization Rule", segmentsStr) +} + +func (id AuthorizationRuleId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.EventHub/namespaces/%s/authorizationRules/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.NamespaceName, id.Name) +} + +// AuthorizationRuleID parses a AuthorizationRule ID into an AuthorizationRuleId struct +func AuthorizationRuleID(input string) (*AuthorizationRuleId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := AuthorizationRuleId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.NamespaceName, err = id.PopSegment("namespaces"); err != nil { + return nil, err + } + if resourceId.Name, err = id.PopSegment("authorizationRules"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} + +// AuthorizationRuleIDInsensitively parses an AuthorizationRule ID into an AuthorizationRuleId struct, insensitively +// This should only be used to parse an ID for rewriting to a consistent casing, +// the AuthorizationRuleID method should be used instead for validation etc. +func AuthorizationRuleIDInsensitively(input string) (*AuthorizationRuleId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := AuthorizationRuleId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + // find the correct casing for the 'namespaces' segment + namespacesKey := "namespaces" + for key := range id.Path { + if strings.EqualFold(key, namespacesKey) { + namespacesKey = key + break + } + } + if resourceId.NamespaceName, err = id.PopSegment(namespacesKey); err != nil { + return nil, err + } + + // find the correct casing for the 'authorizationRules' segment + authorizationRulesKey := "authorizationRules" + for key := range id.Path { + if strings.EqualFold(key, authorizationRulesKey) { + authorizationRulesKey = key + break + } + } + if resourceId.Name, err = id.PopSegment(authorizationRulesKey); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/id_authorizationrule_test.go b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/id_authorizationrule_test.go new file mode 100644 index 000000000000..101285bccc47 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/id_authorizationrule_test.go @@ -0,0 +1,262 @@ +package authorizationrulesnamespaces + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.Id = AuthorizationRuleId{} + +func TestAuthorizationRuleIDFormatter(t *testing.T) { + actual := NewAuthorizationRuleID("{subscriptionId}", "{resourceGroupName}", "{namespaceName}", "{authorizationRuleName}").ID() + expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/authorizationRules/{authorizationRuleName}" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestAuthorizationRuleID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *AuthorizationRuleId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/authorizationRules/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/authorizationRules/{authorizationRuleName}", + Expected: &AuthorizationRuleId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{authorizationRuleName}", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.EVENTHUB/NAMESPACES/{NAMESPACENAME}/AUTHORIZATIONRULES/{AUTHORIZATIONRULENAME}", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := AuthorizationRuleID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} + +func TestAuthorizationRuleIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *AuthorizationRuleId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/authorizationRules/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/authorizationRules/{authorizationRuleName}", + Expected: &AuthorizationRuleId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{authorizationRuleName}", + }, + }, + + { + // lower-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/authorizationrules/{authorizationRuleName}", + Expected: &AuthorizationRuleId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{authorizationRuleName}", + }, + }, + + { + // upper-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NAMESPACES/{namespaceName}/AUTHORIZATIONRULES/{authorizationRuleName}", + Expected: &AuthorizationRuleId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{authorizationRuleName}", + }, + }, + + { + // mixed-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NaMeSpAcEs/{namespaceName}/AuThOrIzAtIoNrUlEs/{authorizationRuleName}", + Expected: &AuthorizationRuleId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{authorizationRuleName}", + }, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := AuthorizationRuleIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/id_namespace.go b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/id_namespace.go new file mode 100644 index 000000000000..bd78c1943b8f --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/id_namespace.go @@ -0,0 +1,108 @@ +package authorizationrulesnamespaces + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +type NamespaceId struct { + SubscriptionId string + ResourceGroup string + Name string +} + +func NewNamespaceID(subscriptionId, resourceGroup, name string) NamespaceId { + return NamespaceId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + Name: name, + } +} + +func (id NamespaceId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Namespace", segmentsStr) +} + +func (id NamespaceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.EventHub/namespaces/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.Name) +} + +// NamespaceID parses a Namespace ID into an NamespaceId struct +func NamespaceID(input string) (*NamespaceId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := NamespaceId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.Name, err = id.PopSegment("namespaces"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} + +// NamespaceIDInsensitively parses an Namespace ID into an NamespaceId struct, insensitively +// This should only be used to parse an ID for rewriting to a consistent casing, +// the NamespaceID method should be used instead for validation etc. +func NamespaceIDInsensitively(input string) (*NamespaceId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := NamespaceId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + // find the correct casing for the 'namespaces' segment + namespacesKey := "namespaces" + for key := range id.Path { + if strings.EqualFold(key, namespacesKey) { + namespacesKey = key + break + } + } + if resourceId.Name, err = id.PopSegment(namespacesKey); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/id_namespace_test.go b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/id_namespace_test.go new file mode 100644 index 000000000000..71c37188a882 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/id_namespace_test.go @@ -0,0 +1,227 @@ +package authorizationrulesnamespaces + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.Id = NamespaceId{} + +func TestNamespaceIDFormatter(t *testing.T) { + actual := NewNamespaceID("{subscriptionId}", "{resourceGroupName}", "{namespaceName}").ID() + expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestNamespaceID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NamespaceId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.EVENTHUB/NAMESPACES/{NAMESPACENAME}", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := NamespaceID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} + +func TestNamespaceIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NamespaceId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // lower-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // upper-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NAMESPACES/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // mixed-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NaMeSpAcEs/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := NamespaceIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/method_namespacescreateorupdateauthorizationrule_autorest.go b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/method_namespacescreateorupdateauthorizationrule_autorest.go new file mode 100644 index 000000000000..2ead61259057 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/method_namespacescreateorupdateauthorizationrule_autorest.go @@ -0,0 +1,65 @@ +package authorizationrulesnamespaces + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type NamespacesCreateOrUpdateAuthorizationRuleResponse struct { + HttpResponse *http.Response + Model *AuthorizationRule +} + +// NamespacesCreateOrUpdateAuthorizationRule ... +func (c AuthorizationRulesNamespacesClient) NamespacesCreateOrUpdateAuthorizationRule(ctx context.Context, id AuthorizationRuleId, input AuthorizationRule) (result NamespacesCreateOrUpdateAuthorizationRuleResponse, err error) { + req, err := c.preparerForNamespacesCreateOrUpdateAuthorizationRule(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationrulesnamespaces.AuthorizationRulesNamespacesClient", "NamespacesCreateOrUpdateAuthorizationRule", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationrulesnamespaces.AuthorizationRulesNamespacesClient", "NamespacesCreateOrUpdateAuthorizationRule", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForNamespacesCreateOrUpdateAuthorizationRule(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationrulesnamespaces.AuthorizationRulesNamespacesClient", "NamespacesCreateOrUpdateAuthorizationRule", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForNamespacesCreateOrUpdateAuthorizationRule prepares the NamespacesCreateOrUpdateAuthorizationRule request. +func (c AuthorizationRulesNamespacesClient) preparerForNamespacesCreateOrUpdateAuthorizationRule(ctx context.Context, id AuthorizationRuleId, input AuthorizationRule) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForNamespacesCreateOrUpdateAuthorizationRule handles the response to the NamespacesCreateOrUpdateAuthorizationRule request. The method always +// closes the http.Response Body. +func (c AuthorizationRulesNamespacesClient) responderForNamespacesCreateOrUpdateAuthorizationRule(resp *http.Response) (result NamespacesCreateOrUpdateAuthorizationRuleResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/method_namespacesdeleteauthorizationrule_autorest.go b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/method_namespacesdeleteauthorizationrule_autorest.go new file mode 100644 index 000000000000..c0bbef745242 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/method_namespacesdeleteauthorizationrule_autorest.go @@ -0,0 +1,61 @@ +package authorizationrulesnamespaces + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type NamespacesDeleteAuthorizationRuleResponse struct { + HttpResponse *http.Response +} + +// NamespacesDeleteAuthorizationRule ... +func (c AuthorizationRulesNamespacesClient) NamespacesDeleteAuthorizationRule(ctx context.Context, id AuthorizationRuleId) (result NamespacesDeleteAuthorizationRuleResponse, err error) { + req, err := c.preparerForNamespacesDeleteAuthorizationRule(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationrulesnamespaces.AuthorizationRulesNamespacesClient", "NamespacesDeleteAuthorizationRule", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationrulesnamespaces.AuthorizationRulesNamespacesClient", "NamespacesDeleteAuthorizationRule", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForNamespacesDeleteAuthorizationRule(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationrulesnamespaces.AuthorizationRulesNamespacesClient", "NamespacesDeleteAuthorizationRule", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForNamespacesDeleteAuthorizationRule prepares the NamespacesDeleteAuthorizationRule request. +func (c AuthorizationRulesNamespacesClient) preparerForNamespacesDeleteAuthorizationRule(ctx context.Context, id AuthorizationRuleId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForNamespacesDeleteAuthorizationRule handles the response to the NamespacesDeleteAuthorizationRule request. The method always +// closes the http.Response Body. +func (c AuthorizationRulesNamespacesClient) responderForNamespacesDeleteAuthorizationRule(resp *http.Response) (result NamespacesDeleteAuthorizationRuleResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/method_namespacesgetauthorizationrule_autorest.go b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/method_namespacesgetauthorizationrule_autorest.go new file mode 100644 index 000000000000..ad92f77d388c --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/method_namespacesgetauthorizationrule_autorest.go @@ -0,0 +1,64 @@ +package authorizationrulesnamespaces + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type NamespacesGetAuthorizationRuleResponse struct { + HttpResponse *http.Response + Model *AuthorizationRule +} + +// NamespacesGetAuthorizationRule ... +func (c AuthorizationRulesNamespacesClient) NamespacesGetAuthorizationRule(ctx context.Context, id AuthorizationRuleId) (result NamespacesGetAuthorizationRuleResponse, err error) { + req, err := c.preparerForNamespacesGetAuthorizationRule(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationrulesnamespaces.AuthorizationRulesNamespacesClient", "NamespacesGetAuthorizationRule", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationrulesnamespaces.AuthorizationRulesNamespacesClient", "NamespacesGetAuthorizationRule", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForNamespacesGetAuthorizationRule(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationrulesnamespaces.AuthorizationRulesNamespacesClient", "NamespacesGetAuthorizationRule", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForNamespacesGetAuthorizationRule prepares the NamespacesGetAuthorizationRule request. +func (c AuthorizationRulesNamespacesClient) preparerForNamespacesGetAuthorizationRule(ctx context.Context, id AuthorizationRuleId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForNamespacesGetAuthorizationRule handles the response to the NamespacesGetAuthorizationRule request. The method always +// closes the http.Response Body. +func (c AuthorizationRulesNamespacesClient) responderForNamespacesGetAuthorizationRule(resp *http.Response) (result NamespacesGetAuthorizationRuleResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/method_namespaceslistauthorizationrules_autorest.go b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/method_namespaceslistauthorizationrules_autorest.go new file mode 100644 index 000000000000..8d9696846633 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/method_namespaceslistauthorizationrules_autorest.go @@ -0,0 +1,196 @@ +package authorizationrulesnamespaces + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type NamespacesListAuthorizationRulesResponse struct { + HttpResponse *http.Response + Model *[]AuthorizationRule + + nextLink *string + nextPageFunc func(ctx context.Context, nextLink string) (NamespacesListAuthorizationRulesResponse, error) +} + +type NamespacesListAuthorizationRulesCompleteResult struct { + Items []AuthorizationRule +} + +func (r NamespacesListAuthorizationRulesResponse) HasMore() bool { + return r.nextLink != nil +} + +func (r NamespacesListAuthorizationRulesResponse) LoadMore(ctx context.Context) (resp NamespacesListAuthorizationRulesResponse, err error) { + if !r.HasMore() { + err = fmt.Errorf("no more pages returned") + return + } + return r.nextPageFunc(ctx, *r.nextLink) +} + +type AuthorizationRulePredicate struct { + // TODO: implement me +} + +func (p AuthorizationRulePredicate) Matches(input AuthorizationRule) bool { + // TODO: implement me + // if p.Name != nil && input.Name != *p.Name { + // return false + // } + + return true +} + +// NamespacesListAuthorizationRules ... +func (c AuthorizationRulesNamespacesClient) NamespacesListAuthorizationRules(ctx context.Context, id NamespaceId) (resp NamespacesListAuthorizationRulesResponse, err error) { + req, err := c.preparerForNamespacesListAuthorizationRules(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationrulesnamespaces.AuthorizationRulesNamespacesClient", "NamespacesListAuthorizationRules", nil, "Failure preparing request") + return + } + + resp.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationrulesnamespaces.AuthorizationRulesNamespacesClient", "NamespacesListAuthorizationRules", resp.HttpResponse, "Failure sending request") + return + } + + resp, err = c.responderForNamespacesListAuthorizationRules(resp.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationrulesnamespaces.AuthorizationRulesNamespacesClient", "NamespacesListAuthorizationRules", resp.HttpResponse, "Failure responding to request") + return + } + return +} + +// NamespacesListAuthorizationRulesCompleteMatchingPredicate retrieves all of the results into a single object +func (c AuthorizationRulesNamespacesClient) NamespacesListAuthorizationRulesComplete(ctx context.Context, id NamespaceId) (NamespacesListAuthorizationRulesCompleteResult, error) { + return c.NamespacesListAuthorizationRulesCompleteMatchingPredicate(ctx, id, AuthorizationRulePredicate{}) +} + +// NamespacesListAuthorizationRulesCompleteMatchingPredicate retrieves all of the results and then applied the predicate +func (c AuthorizationRulesNamespacesClient) NamespacesListAuthorizationRulesCompleteMatchingPredicate(ctx context.Context, id NamespaceId, predicate AuthorizationRulePredicate) (resp NamespacesListAuthorizationRulesCompleteResult, err error) { + items := make([]AuthorizationRule, 0) + + page, err := c.NamespacesListAuthorizationRules(ctx, id) + if err != nil { + err = fmt.Errorf("loading the initial page: %+v", err) + return + } + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + for page.HasMore() { + page, err = page.LoadMore(ctx) + if err != nil { + err = fmt.Errorf("loading the next page: %+v", err) + return + } + + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + } + + out := NamespacesListAuthorizationRulesCompleteResult{ + Items: items, + } + return out, nil +} + +// preparerForNamespacesListAuthorizationRules prepares the NamespacesListAuthorizationRules request. +func (c AuthorizationRulesNamespacesClient) preparerForNamespacesListAuthorizationRules(ctx context.Context, id NamespaceId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/authorizationRules", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// preparerForNamespacesListAuthorizationRulesWithNextLink prepares the NamespacesListAuthorizationRules request with the given nextLink token. +func (c AuthorizationRulesNamespacesClient) preparerForNamespacesListAuthorizationRulesWithNextLink(ctx context.Context, nextLink string) (*http.Request, error) { + uri, err := url.Parse(nextLink) + if err != nil { + return nil, fmt.Errorf("parsing nextLink %q: %+v", nextLink, err) + } + queryParameters := map[string]interface{}{} + for k, v := range uri.Query() { + if len(v) == 0 { + continue + } + val := v[0] + val = autorest.Encode("query", val) + queryParameters[k] = val + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(uri.Path), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForNamespacesListAuthorizationRules handles the response to the NamespacesListAuthorizationRules request. The method always +// closes the http.Response Body. +func (c AuthorizationRulesNamespacesClient) responderForNamespacesListAuthorizationRules(resp *http.Response) (result NamespacesListAuthorizationRulesResponse, err error) { + type page struct { + Values []AuthorizationRule `json:"value"` + NextLink *string `json:"nextLink"` + } + var respObj page + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&respObj), + autorest.ByClosing()) + result.HttpResponse = resp + result.Model = &respObj.Values + result.nextLink = respObj.NextLink + if respObj.NextLink != nil { + result.nextPageFunc = func(ctx context.Context, nextLink string) (result NamespacesListAuthorizationRulesResponse, err error) { + req, err := c.preparerForNamespacesListAuthorizationRulesWithNextLink(ctx, nextLink) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationrulesnamespaces.AuthorizationRulesNamespacesClient", "NamespacesListAuthorizationRules", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationrulesnamespaces.AuthorizationRulesNamespacesClient", "NamespacesListAuthorizationRules", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForNamespacesListAuthorizationRules(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationrulesnamespaces.AuthorizationRulesNamespacesClient", "NamespacesListAuthorizationRules", result.HttpResponse, "Failure responding to request") + return + } + + return + } + } + return +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/method_namespaceslistkeys_autorest.go b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/method_namespaceslistkeys_autorest.go new file mode 100644 index 000000000000..e53c912c8d36 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/method_namespaceslistkeys_autorest.go @@ -0,0 +1,65 @@ +package authorizationrulesnamespaces + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type NamespacesListKeysResponse struct { + HttpResponse *http.Response + Model *AccessKeys +} + +// NamespacesListKeys ... +func (c AuthorizationRulesNamespacesClient) NamespacesListKeys(ctx context.Context, id AuthorizationRuleId) (result NamespacesListKeysResponse, err error) { + req, err := c.preparerForNamespacesListKeys(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationrulesnamespaces.AuthorizationRulesNamespacesClient", "NamespacesListKeys", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationrulesnamespaces.AuthorizationRulesNamespacesClient", "NamespacesListKeys", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForNamespacesListKeys(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationrulesnamespaces.AuthorizationRulesNamespacesClient", "NamespacesListKeys", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForNamespacesListKeys prepares the NamespacesListKeys request. +func (c AuthorizationRulesNamespacesClient) preparerForNamespacesListKeys(ctx context.Context, id AuthorizationRuleId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/listKeys", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForNamespacesListKeys handles the response to the NamespacesListKeys request. The method always +// closes the http.Response Body. +func (c AuthorizationRulesNamespacesClient) responderForNamespacesListKeys(resp *http.Response) (result NamespacesListKeysResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/method_namespacesregeneratekeys_autorest.go b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/method_namespacesregeneratekeys_autorest.go new file mode 100644 index 000000000000..3f419c69e47d --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/method_namespacesregeneratekeys_autorest.go @@ -0,0 +1,66 @@ +package authorizationrulesnamespaces + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type NamespacesRegenerateKeysResponse struct { + HttpResponse *http.Response + Model *AccessKeys +} + +// NamespacesRegenerateKeys ... +func (c AuthorizationRulesNamespacesClient) NamespacesRegenerateKeys(ctx context.Context, id AuthorizationRuleId, input RegenerateAccessKeyParameters) (result NamespacesRegenerateKeysResponse, err error) { + req, err := c.preparerForNamespacesRegenerateKeys(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationrulesnamespaces.AuthorizationRulesNamespacesClient", "NamespacesRegenerateKeys", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationrulesnamespaces.AuthorizationRulesNamespacesClient", "NamespacesRegenerateKeys", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForNamespacesRegenerateKeys(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "authorizationrulesnamespaces.AuthorizationRulesNamespacesClient", "NamespacesRegenerateKeys", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForNamespacesRegenerateKeys prepares the NamespacesRegenerateKeys request. +func (c AuthorizationRulesNamespacesClient) preparerForNamespacesRegenerateKeys(ctx context.Context, id AuthorizationRuleId, input RegenerateAccessKeyParameters) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/regenerateKeys", id.ID())), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForNamespacesRegenerateKeys handles the response to the NamespacesRegenerateKeys request. The method always +// closes the http.Response Body. +func (c AuthorizationRulesNamespacesClient) responderForNamespacesRegenerateKeys(resp *http.Response) (result NamespacesRegenerateKeysResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/model_accesskeys.go b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/model_accesskeys.go new file mode 100644 index 000000000000..3056d6122af0 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/model_accesskeys.go @@ -0,0 +1,11 @@ +package authorizationrulesnamespaces + +type AccessKeys struct { + AliasPrimaryConnectionString *string `json:"aliasPrimaryConnectionString,omitempty"` + AliasSecondaryConnectionString *string `json:"aliasSecondaryConnectionString,omitempty"` + KeyName *string `json:"keyName,omitempty"` + PrimaryConnectionString *string `json:"primaryConnectionString,omitempty"` + PrimaryKey *string `json:"primaryKey,omitempty"` + SecondaryConnectionString *string `json:"secondaryConnectionString,omitempty"` + SecondaryKey *string `json:"secondaryKey,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/model_authorizationrule.go b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/model_authorizationrule.go new file mode 100644 index 000000000000..61c25fbd284f --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/model_authorizationrule.go @@ -0,0 +1,8 @@ +package authorizationrulesnamespaces + +type AuthorizationRule struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *AuthorizationRuleProperties `json:"properties,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/model_authorizationruleproperties.go b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/model_authorizationruleproperties.go new file mode 100644 index 000000000000..5e83e7a2c644 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/model_authorizationruleproperties.go @@ -0,0 +1,5 @@ +package authorizationrulesnamespaces + +type AuthorizationRuleProperties struct { + Rights []string `json:"rights"` +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/model_regenerateaccesskeyparameters.go b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/model_regenerateaccesskeyparameters.go new file mode 100644 index 000000000000..ea24513b1b5b --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/model_regenerateaccesskeyparameters.go @@ -0,0 +1,6 @@ +package authorizationrulesnamespaces + +type RegenerateAccessKeyParameters struct { + Key *string `json:"key,omitempty"` + KeyType KeyType `json:"keyType"` +} diff --git a/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/version.go b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/version.go new file mode 100644 index 000000000000..c76054ce5508 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/authorizationrulesnamespaces/version.go @@ -0,0 +1,9 @@ +package authorizationrulesnamespaces + +import "fmt" + +const defaultApiVersion = "2017-04-01" + +func userAgent() string { + return fmt.Sprintf("pandora/authorizationrulesnamespaces/%s", defaultApiVersion) +} diff --git a/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/client.go b/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/client.go new file mode 100644 index 000000000000..69aef5e26bc0 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/client.go @@ -0,0 +1,15 @@ +package checknameavailabilitydisasterrecoveryconfigs + +import "github.com/Azure/go-autorest/autorest" + +type CheckNameAvailabilityDisasterRecoveryConfigsClient struct { + Client autorest.Client + baseUri string +} + +func NewCheckNameAvailabilityDisasterRecoveryConfigsClientWithBaseURI(endpoint string) CheckNameAvailabilityDisasterRecoveryConfigsClient { + return CheckNameAvailabilityDisasterRecoveryConfigsClient{ + Client: autorest.NewClientWithUserAgent(userAgent()), + baseUri: endpoint, + } +} diff --git a/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/constants.go b/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/constants.go new file mode 100644 index 000000000000..ec4c154d8749 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/constants.go @@ -0,0 +1,12 @@ +package checknameavailabilitydisasterrecoveryconfigs + +type UnavailableReason string + +const ( + UnavailableReasonInvalidName UnavailableReason = "InvalidName" + UnavailableReasonNameInLockdown UnavailableReason = "NameInLockdown" + UnavailableReasonNameInUse UnavailableReason = "NameInUse" + UnavailableReasonNone UnavailableReason = "None" + UnavailableReasonSubscriptionIsDisabled UnavailableReason = "SubscriptionIsDisabled" + UnavailableReasonTooManyNamespaceInCurrentSubscription UnavailableReason = "TooManyNamespaceInCurrentSubscription" +) diff --git a/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/id_namespace.go b/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/id_namespace.go new file mode 100644 index 000000000000..e303601e67b4 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/id_namespace.go @@ -0,0 +1,108 @@ +package checknameavailabilitydisasterrecoveryconfigs + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +type NamespaceId struct { + SubscriptionId string + ResourceGroup string + Name string +} + +func NewNamespaceID(subscriptionId, resourceGroup, name string) NamespaceId { + return NamespaceId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + Name: name, + } +} + +func (id NamespaceId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Namespace", segmentsStr) +} + +func (id NamespaceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.EventHub/namespaces/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.Name) +} + +// NamespaceID parses a Namespace ID into an NamespaceId struct +func NamespaceID(input string) (*NamespaceId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := NamespaceId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.Name, err = id.PopSegment("namespaces"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} + +// NamespaceIDInsensitively parses an Namespace ID into an NamespaceId struct, insensitively +// This should only be used to parse an ID for rewriting to a consistent casing, +// the NamespaceID method should be used instead for validation etc. +func NamespaceIDInsensitively(input string) (*NamespaceId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := NamespaceId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + // find the correct casing for the 'namespaces' segment + namespacesKey := "namespaces" + for key := range id.Path { + if strings.EqualFold(key, namespacesKey) { + namespacesKey = key + break + } + } + if resourceId.Name, err = id.PopSegment(namespacesKey); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/id_namespace_test.go b/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/id_namespace_test.go new file mode 100644 index 000000000000..da0bf3a86291 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/id_namespace_test.go @@ -0,0 +1,227 @@ +package checknameavailabilitydisasterrecoveryconfigs + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.Id = NamespaceId{} + +func TestNamespaceIDFormatter(t *testing.T) { + actual := NewNamespaceID("{subscriptionId}", "{resourceGroupName}", "{namespaceName}").ID() + expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestNamespaceID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NamespaceId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.EVENTHUB/NAMESPACES/{NAMESPACENAME}", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := NamespaceID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} + +func TestNamespaceIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NamespaceId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // lower-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // upper-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NAMESPACES/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // mixed-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NaMeSpAcEs/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := NamespaceIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/method_disasterrecoveryconfigschecknameavailability_autorest.go b/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/method_disasterrecoveryconfigschecknameavailability_autorest.go new file mode 100644 index 000000000000..4e8131e227ef --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/method_disasterrecoveryconfigschecknameavailability_autorest.go @@ -0,0 +1,66 @@ +package checknameavailabilitydisasterrecoveryconfigs + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type DisasterRecoveryConfigsCheckNameAvailabilityResponse struct { + HttpResponse *http.Response + Model *CheckNameAvailabilityResult +} + +// DisasterRecoveryConfigsCheckNameAvailability ... +func (c CheckNameAvailabilityDisasterRecoveryConfigsClient) DisasterRecoveryConfigsCheckNameAvailability(ctx context.Context, id NamespaceId, input CheckNameAvailabilityParameter) (result DisasterRecoveryConfigsCheckNameAvailabilityResponse, err error) { + req, err := c.preparerForDisasterRecoveryConfigsCheckNameAvailability(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "checknameavailabilitydisasterrecoveryconfigs.CheckNameAvailabilityDisasterRecoveryConfigsClient", "DisasterRecoveryConfigsCheckNameAvailability", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "checknameavailabilitydisasterrecoveryconfigs.CheckNameAvailabilityDisasterRecoveryConfigsClient", "DisasterRecoveryConfigsCheckNameAvailability", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForDisasterRecoveryConfigsCheckNameAvailability(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "checknameavailabilitydisasterrecoveryconfigs.CheckNameAvailabilityDisasterRecoveryConfigsClient", "DisasterRecoveryConfigsCheckNameAvailability", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForDisasterRecoveryConfigsCheckNameAvailability prepares the DisasterRecoveryConfigsCheckNameAvailability request. +func (c CheckNameAvailabilityDisasterRecoveryConfigsClient) preparerForDisasterRecoveryConfigsCheckNameAvailability(ctx context.Context, id NamespaceId, input CheckNameAvailabilityParameter) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/disasterRecoveryConfigs/checkNameAvailability", id.ID())), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForDisasterRecoveryConfigsCheckNameAvailability handles the response to the DisasterRecoveryConfigsCheckNameAvailability request. The method always +// closes the http.Response Body. +func (c CheckNameAvailabilityDisasterRecoveryConfigsClient) responderForDisasterRecoveryConfigsCheckNameAvailability(resp *http.Response) (result DisasterRecoveryConfigsCheckNameAvailabilityResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/model_checknameavailabilityparameter.go b/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/model_checknameavailabilityparameter.go new file mode 100644 index 000000000000..38ce1ff42f9f --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/model_checknameavailabilityparameter.go @@ -0,0 +1,5 @@ +package checknameavailabilitydisasterrecoveryconfigs + +type CheckNameAvailabilityParameter struct { + Name string `json:"name"` +} diff --git a/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/model_checknameavailabilityresult.go b/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/model_checknameavailabilityresult.go new file mode 100644 index 000000000000..2cefe21adb81 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/model_checknameavailabilityresult.go @@ -0,0 +1,7 @@ +package checknameavailabilitydisasterrecoveryconfigs + +type CheckNameAvailabilityResult struct { + Message *string `json:"message,omitempty"` + NameAvailable *bool `json:"nameAvailable,omitempty"` + Reason *UnavailableReason `json:"reason,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/version.go b/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/version.go new file mode 100644 index 000000000000..cfbaca24f2f7 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/checknameavailabilitydisasterrecoveryconfigs/version.go @@ -0,0 +1,9 @@ +package checknameavailabilitydisasterrecoveryconfigs + +import "fmt" + +const defaultApiVersion = "2017-04-01" + +func userAgent() string { + return fmt.Sprintf("pandora/checknameavailabilitydisasterrecoveryconfigs/%s", defaultApiVersion) +} diff --git a/azurerm/internal/services/eventhub/sdk/consumergroups/client.go b/azurerm/internal/services/eventhub/sdk/consumergroups/client.go new file mode 100644 index 000000000000..d3f3bb758e69 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/consumergroups/client.go @@ -0,0 +1,15 @@ +package consumergroups + +import "github.com/Azure/go-autorest/autorest" + +type ConsumerGroupsClient struct { + Client autorest.Client + baseUri string +} + +func NewConsumerGroupsClientWithBaseURI(endpoint string) ConsumerGroupsClient { + return ConsumerGroupsClient{ + Client: autorest.NewClientWithUserAgent(userAgent()), + baseUri: endpoint, + } +} diff --git a/azurerm/internal/services/eventhub/sdk/consumergroups/id_consumergroup.go b/azurerm/internal/services/eventhub/sdk/consumergroups/id_consumergroup.go new file mode 100644 index 000000000000..e7d3b36dce54 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/consumergroups/id_consumergroup.go @@ -0,0 +1,144 @@ +package consumergroups + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +type ConsumergroupId struct { + SubscriptionId string + ResourceGroup string + NamespaceName string + EventhubName string + Name string +} + +func NewConsumergroupID(subscriptionId, resourceGroup, namespaceName, eventhubName, name string) ConsumergroupId { + return ConsumergroupId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + NamespaceName: namespaceName, + EventhubName: eventhubName, + Name: name, + } +} + +func (id ConsumergroupId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Eventhub Name %q", id.EventhubName), + fmt.Sprintf("Namespace Name %q", id.NamespaceName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Consumergroup", segmentsStr) +} + +func (id ConsumergroupId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.EventHub/namespaces/%s/eventhubs/%s/consumergroups/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.NamespaceName, id.EventhubName, id.Name) +} + +// ConsumergroupID parses a Consumergroup ID into an ConsumergroupId struct +func ConsumergroupID(input string) (*ConsumergroupId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := ConsumergroupId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.NamespaceName, err = id.PopSegment("namespaces"); err != nil { + return nil, err + } + if resourceId.EventhubName, err = id.PopSegment("eventhubs"); err != nil { + return nil, err + } + if resourceId.Name, err = id.PopSegment("consumergroups"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} + +// ConsumergroupIDInsensitively parses an Consumergroup ID into an ConsumergroupId struct, insensitively +// This should only be used to parse an ID for rewriting to a consistent casing, +// the ConsumergroupID method should be used instead for validation etc. +func ConsumergroupIDInsensitively(input string) (*ConsumergroupId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := ConsumergroupId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + // find the correct casing for the 'namespaces' segment + namespacesKey := "namespaces" + for key := range id.Path { + if strings.EqualFold(key, namespacesKey) { + namespacesKey = key + break + } + } + if resourceId.NamespaceName, err = id.PopSegment(namespacesKey); err != nil { + return nil, err + } + + // find the correct casing for the 'eventhubs' segment + eventhubsKey := "eventhubs" + for key := range id.Path { + if strings.EqualFold(key, eventhubsKey) { + eventhubsKey = key + break + } + } + if resourceId.EventhubName, err = id.PopSegment(eventhubsKey); err != nil { + return nil, err + } + + // find the correct casing for the 'consumergroups' segment + consumergroupsKey := "consumergroups" + for key := range id.Path { + if strings.EqualFold(key, consumergroupsKey) { + consumergroupsKey = key + break + } + } + if resourceId.Name, err = id.PopSegment(consumergroupsKey); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/eventhub/sdk/consumergroups/id_consumergroup_test.go b/azurerm/internal/services/eventhub/sdk/consumergroups/id_consumergroup_test.go new file mode 100644 index 000000000000..b1f84a451404 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/consumergroups/id_consumergroup_test.go @@ -0,0 +1,297 @@ +package consumergroups + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.Id = ConsumergroupId{} + +func TestConsumergroupIDFormatter(t *testing.T) { + actual := NewConsumergroupID("{subscriptionId}", "{resourceGroupName}", "{namespaceName}", "{eventHubName}", "{consumerGroupName}").ID() + expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/consumergroups/{consumerGroupName}" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestConsumergroupID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ConsumergroupId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // missing EventhubName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/", + Error: true, + }, + + { + // missing value for EventhubName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/consumergroups/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/consumergroups/{consumerGroupName}", + Expected: &ConsumergroupId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + EventhubName: "{eventHubName}", + Name: "{consumerGroupName}", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.EVENTHUB/NAMESPACES/{NAMESPACENAME}/EVENTHUBS/{EVENTHUBNAME}/CONSUMERGROUPS/{CONSUMERGROUPNAME}", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ConsumergroupID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + if actual.EventhubName != v.Expected.EventhubName { + t.Fatalf("Expected %q but got %q for EventhubName", v.Expected.EventhubName, actual.EventhubName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} + +func TestConsumergroupIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ConsumergroupId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // missing EventhubName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/", + Error: true, + }, + + { + // missing value for EventhubName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/consumergroups/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/consumergroups/{consumerGroupName}", + Expected: &ConsumergroupId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + EventhubName: "{eventHubName}", + Name: "{consumerGroupName}", + }, + }, + + { + // lower-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/consumergroups/{consumerGroupName}", + Expected: &ConsumergroupId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + EventhubName: "{eventHubName}", + Name: "{consumerGroupName}", + }, + }, + + { + // upper-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NAMESPACES/{namespaceName}/EVENTHUBS/{eventHubName}/CONSUMERGROUPS/{consumerGroupName}", + Expected: &ConsumergroupId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + EventhubName: "{eventHubName}", + Name: "{consumerGroupName}", + }, + }, + + { + // mixed-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NaMeSpAcEs/{namespaceName}/EvEnThUbS/{eventHubName}/CoNsUmErGrOuPs/{consumerGroupName}", + Expected: &ConsumergroupId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + EventhubName: "{eventHubName}", + Name: "{consumerGroupName}", + }, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ConsumergroupIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + if actual.EventhubName != v.Expected.EventhubName { + t.Fatalf("Expected %q but got %q for EventhubName", v.Expected.EventhubName, actual.EventhubName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/eventhub/sdk/consumergroups/id_eventhub.go b/azurerm/internal/services/eventhub/sdk/consumergroups/id_eventhub.go new file mode 100644 index 000000000000..7f776a2eaff9 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/consumergroups/id_eventhub.go @@ -0,0 +1,126 @@ +package consumergroups + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +type EventhubId struct { + SubscriptionId string + ResourceGroup string + NamespaceName string + Name string +} + +func NewEventhubID(subscriptionId, resourceGroup, namespaceName, name string) EventhubId { + return EventhubId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + NamespaceName: namespaceName, + Name: name, + } +} + +func (id EventhubId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Namespace Name %q", id.NamespaceName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Eventhub", segmentsStr) +} + +func (id EventhubId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.EventHub/namespaces/%s/eventhubs/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.NamespaceName, id.Name) +} + +// EventhubID parses a Eventhub ID into an EventhubId struct +func EventhubID(input string) (*EventhubId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := EventhubId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.NamespaceName, err = id.PopSegment("namespaces"); err != nil { + return nil, err + } + if resourceId.Name, err = id.PopSegment("eventhubs"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} + +// EventhubIDInsensitively parses an Eventhub ID into an EventhubId struct, insensitively +// This should only be used to parse an ID for rewriting to a consistent casing, +// the EventhubID method should be used instead for validation etc. +func EventhubIDInsensitively(input string) (*EventhubId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := EventhubId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + // find the correct casing for the 'namespaces' segment + namespacesKey := "namespaces" + for key := range id.Path { + if strings.EqualFold(key, namespacesKey) { + namespacesKey = key + break + } + } + if resourceId.NamespaceName, err = id.PopSegment(namespacesKey); err != nil { + return nil, err + } + + // find the correct casing for the 'eventhubs' segment + eventhubsKey := "eventhubs" + for key := range id.Path { + if strings.EqualFold(key, eventhubsKey) { + eventhubsKey = key + break + } + } + if resourceId.Name, err = id.PopSegment(eventhubsKey); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/eventhub/sdk/consumergroups/id_eventhub_test.go b/azurerm/internal/services/eventhub/sdk/consumergroups/id_eventhub_test.go new file mode 100644 index 000000000000..2321fadc83e1 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/consumergroups/id_eventhub_test.go @@ -0,0 +1,262 @@ +package consumergroups + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.Id = EventhubId{} + +func TestEventhubIDFormatter(t *testing.T) { + actual := NewEventhubID("{subscriptionId}", "{resourceGroupName}", "{namespaceName}", "{eventHubName}").ID() + expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestEventhubID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *EventhubId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}", + Expected: &EventhubId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{eventHubName}", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.EVENTHUB/NAMESPACES/{NAMESPACENAME}/EVENTHUBS/{EVENTHUBNAME}", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := EventhubID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} + +func TestEventhubIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *EventhubId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}", + Expected: &EventhubId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{eventHubName}", + }, + }, + + { + // lower-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}", + Expected: &EventhubId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{eventHubName}", + }, + }, + + { + // upper-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NAMESPACES/{namespaceName}/EVENTHUBS/{eventHubName}", + Expected: &EventhubId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{eventHubName}", + }, + }, + + { + // mixed-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NaMeSpAcEs/{namespaceName}/EvEnThUbS/{eventHubName}", + Expected: &EventhubId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{eventHubName}", + }, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := EventhubIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/eventhub/sdk/consumergroups/method_createorupdate_autorest.go b/azurerm/internal/services/eventhub/sdk/consumergroups/method_createorupdate_autorest.go new file mode 100644 index 000000000000..b138b758958a --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/consumergroups/method_createorupdate_autorest.go @@ -0,0 +1,65 @@ +package consumergroups + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type CreateOrUpdateResponse struct { + HttpResponse *http.Response + Model *ConsumerGroup +} + +// CreateOrUpdate ... +func (c ConsumerGroupsClient) CreateOrUpdate(ctx context.Context, id ConsumergroupId, input ConsumerGroup) (result CreateOrUpdateResponse, err error) { + req, err := c.preparerForCreateOrUpdate(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "consumergroups.ConsumerGroupsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "consumergroups.ConsumerGroupsClient", "CreateOrUpdate", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForCreateOrUpdate(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "consumergroups.ConsumerGroupsClient", "CreateOrUpdate", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForCreateOrUpdate prepares the CreateOrUpdate request. +func (c ConsumerGroupsClient) preparerForCreateOrUpdate(ctx context.Context, id ConsumergroupId, input ConsumerGroup) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForCreateOrUpdate handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (c ConsumerGroupsClient) responderForCreateOrUpdate(resp *http.Response) (result CreateOrUpdateResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/consumergroups/method_delete_autorest.go b/azurerm/internal/services/eventhub/sdk/consumergroups/method_delete_autorest.go new file mode 100644 index 000000000000..0ffc66f2142e --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/consumergroups/method_delete_autorest.go @@ -0,0 +1,61 @@ +package consumergroups + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type DeleteResponse struct { + HttpResponse *http.Response +} + +// Delete ... +func (c ConsumerGroupsClient) Delete(ctx context.Context, id ConsumergroupId) (result DeleteResponse, err error) { + req, err := c.preparerForDelete(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "consumergroups.ConsumerGroupsClient", "Delete", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "consumergroups.ConsumerGroupsClient", "Delete", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForDelete(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "consumergroups.ConsumerGroupsClient", "Delete", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForDelete prepares the Delete request. +func (c ConsumerGroupsClient) preparerForDelete(ctx context.Context, id ConsumergroupId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForDelete handles the response to the Delete request. The method always +// closes the http.Response Body. +func (c ConsumerGroupsClient) responderForDelete(resp *http.Response) (result DeleteResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/consumergroups/method_get_autorest.go b/azurerm/internal/services/eventhub/sdk/consumergroups/method_get_autorest.go new file mode 100644 index 000000000000..279ff2cc9e6e --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/consumergroups/method_get_autorest.go @@ -0,0 +1,64 @@ +package consumergroups + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type GetResponse struct { + HttpResponse *http.Response + Model *ConsumerGroup +} + +// Get ... +func (c ConsumerGroupsClient) Get(ctx context.Context, id ConsumergroupId) (result GetResponse, err error) { + req, err := c.preparerForGet(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "consumergroups.ConsumerGroupsClient", "Get", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "consumergroups.ConsumerGroupsClient", "Get", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForGet(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "consumergroups.ConsumerGroupsClient", "Get", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForGet prepares the Get request. +func (c ConsumerGroupsClient) preparerForGet(ctx context.Context, id ConsumergroupId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForGet handles the response to the Get request. The method always +// closes the http.Response Body. +func (c ConsumerGroupsClient) responderForGet(resp *http.Response) (result GetResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/consumergroups/method_listbyeventhub_autorest.go b/azurerm/internal/services/eventhub/sdk/consumergroups/method_listbyeventhub_autorest.go new file mode 100644 index 000000000000..c481d717df7b --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/consumergroups/method_listbyeventhub_autorest.go @@ -0,0 +1,223 @@ +package consumergroups + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type ListByEventHubResponse struct { + HttpResponse *http.Response + Model *[]ConsumerGroup + + nextLink *string + nextPageFunc func(ctx context.Context, nextLink string) (ListByEventHubResponse, error) +} + +type ListByEventHubCompleteResult struct { + Items []ConsumerGroup +} + +func (r ListByEventHubResponse) HasMore() bool { + return r.nextLink != nil +} + +func (r ListByEventHubResponse) LoadMore(ctx context.Context) (resp ListByEventHubResponse, err error) { + if !r.HasMore() { + err = fmt.Errorf("no more pages returned") + return + } + return r.nextPageFunc(ctx, *r.nextLink) +} + +type ListByEventHubOptions struct { + Skip *int64 + Top *int64 +} + +func DefaultListByEventHubOptions() ListByEventHubOptions { + return ListByEventHubOptions{} +} + +func (o ListByEventHubOptions) toQueryString() map[string]interface{} { + out := make(map[string]interface{}) + + if o.Skip != nil { + out["$skip"] = *o.Skip + } + + if o.Top != nil { + out["$top"] = *o.Top + } + + return out +} + +type ConsumerGroupPredicate struct { + // TODO: implement me +} + +func (p ConsumerGroupPredicate) Matches(input ConsumerGroup) bool { + // TODO: implement me + // if p.Name != nil && input.Name != *p.Name { + // return false + // } + + return true +} + +// ListByEventHub ... +func (c ConsumerGroupsClient) ListByEventHub(ctx context.Context, id EventhubId, options ListByEventHubOptions) (resp ListByEventHubResponse, err error) { + req, err := c.preparerForListByEventHub(ctx, id, options) + if err != nil { + err = autorest.NewErrorWithError(err, "consumergroups.ConsumerGroupsClient", "ListByEventHub", nil, "Failure preparing request") + return + } + + resp.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "consumergroups.ConsumerGroupsClient", "ListByEventHub", resp.HttpResponse, "Failure sending request") + return + } + + resp, err = c.responderForListByEventHub(resp.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "consumergroups.ConsumerGroupsClient", "ListByEventHub", resp.HttpResponse, "Failure responding to request") + return + } + return +} + +// ListByEventHubCompleteMatchingPredicate retrieves all of the results into a single object +func (c ConsumerGroupsClient) ListByEventHubComplete(ctx context.Context, id EventhubId, options ListByEventHubOptions) (ListByEventHubCompleteResult, error) { + return c.ListByEventHubCompleteMatchingPredicate(ctx, id, options, ConsumerGroupPredicate{}) +} + +// ListByEventHubCompleteMatchingPredicate retrieves all of the results and then applied the predicate +func (c ConsumerGroupsClient) ListByEventHubCompleteMatchingPredicate(ctx context.Context, id EventhubId, options ListByEventHubOptions, predicate ConsumerGroupPredicate) (resp ListByEventHubCompleteResult, err error) { + items := make([]ConsumerGroup, 0) + + page, err := c.ListByEventHub(ctx, id, options) + if err != nil { + err = fmt.Errorf("loading the initial page: %+v", err) + return + } + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + for page.HasMore() { + page, err = page.LoadMore(ctx) + if err != nil { + err = fmt.Errorf("loading the next page: %+v", err) + return + } + + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + } + + out := ListByEventHubCompleteResult{ + Items: items, + } + return out, nil +} + +// preparerForListByEventHub prepares the ListByEventHub request. +func (c ConsumerGroupsClient) preparerForListByEventHub(ctx context.Context, id EventhubId, options ListByEventHubOptions) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + for k, v := range options.toQueryString() { + queryParameters[k] = autorest.Encode("query", v) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/consumergroups", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// preparerForListByEventHubWithNextLink prepares the ListByEventHub request with the given nextLink token. +func (c ConsumerGroupsClient) preparerForListByEventHubWithNextLink(ctx context.Context, nextLink string) (*http.Request, error) { + uri, err := url.Parse(nextLink) + if err != nil { + return nil, fmt.Errorf("parsing nextLink %q: %+v", nextLink, err) + } + queryParameters := map[string]interface{}{} + for k, v := range uri.Query() { + if len(v) == 0 { + continue + } + val := v[0] + val = autorest.Encode("query", val) + queryParameters[k] = val + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(uri.Path), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForListByEventHub handles the response to the ListByEventHub request. The method always +// closes the http.Response Body. +func (c ConsumerGroupsClient) responderForListByEventHub(resp *http.Response) (result ListByEventHubResponse, err error) { + type page struct { + Values []ConsumerGroup `json:"value"` + NextLink *string `json:"nextLink"` + } + var respObj page + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&respObj), + autorest.ByClosing()) + result.HttpResponse = resp + result.Model = &respObj.Values + result.nextLink = respObj.NextLink + if respObj.NextLink != nil { + result.nextPageFunc = func(ctx context.Context, nextLink string) (result ListByEventHubResponse, err error) { + req, err := c.preparerForListByEventHubWithNextLink(ctx, nextLink) + if err != nil { + err = autorest.NewErrorWithError(err, "consumergroups.ConsumerGroupsClient", "ListByEventHub", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "consumergroups.ConsumerGroupsClient", "ListByEventHub", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForListByEventHub(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "consumergroups.ConsumerGroupsClient", "ListByEventHub", result.HttpResponse, "Failure responding to request") + return + } + + return + } + } + return +} diff --git a/azurerm/internal/services/eventhub/sdk/consumergroups/model_consumergroup.go b/azurerm/internal/services/eventhub/sdk/consumergroups/model_consumergroup.go new file mode 100644 index 000000000000..e279a28b1df6 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/consumergroups/model_consumergroup.go @@ -0,0 +1,8 @@ +package consumergroups + +type ConsumerGroup struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *ConsumerGroupProperties `json:"properties,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/consumergroups/model_consumergroupproperties.go b/azurerm/internal/services/eventhub/sdk/consumergroups/model_consumergroupproperties.go new file mode 100644 index 000000000000..da7d85adc68f --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/consumergroups/model_consumergroupproperties.go @@ -0,0 +1,31 @@ +package consumergroups + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/formatting" +) + +type ConsumerGroupProperties struct { + CreatedAt *string `json:"createdAt,omitempty"` + UpdatedAt *string `json:"updatedAt,omitempty"` + UserMetadata *string `json:"userMetadata,omitempty"` +} + +func (o ConsumerGroupProperties) ListCreatedAtAsTime() (*time.Time, error) { + return formatting.ParseAsDateFormat(o.CreatedAt, "2006-01-02T15:04:05Z07:00") +} + +func (o ConsumerGroupProperties) SetCreatedAtAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CreatedAt = &formatted +} + +func (o ConsumerGroupProperties) ListUpdatedAtAsTime() (*time.Time, error) { + return formatting.ParseAsDateFormat(o.UpdatedAt, "2006-01-02T15:04:05Z07:00") +} + +func (o ConsumerGroupProperties) SetUpdatedAtAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.UpdatedAt = &formatted +} diff --git a/azurerm/internal/services/eventhub/sdk/consumergroups/version.go b/azurerm/internal/services/eventhub/sdk/consumergroups/version.go new file mode 100644 index 000000000000..51280f3d09dc --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/consumergroups/version.go @@ -0,0 +1,9 @@ +package consumergroups + +import "fmt" + +const defaultApiVersion = "2017-04-01" + +func userAgent() string { + return fmt.Sprintf("pandora/consumergroups/%s", defaultApiVersion) +} diff --git a/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/client.go b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/client.go new file mode 100644 index 000000000000..4d9aa02a3ddc --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/client.go @@ -0,0 +1,15 @@ +package disasterrecoveryconfigs + +import "github.com/Azure/go-autorest/autorest" + +type DisasterRecoveryConfigsClient struct { + Client autorest.Client + baseUri string +} + +func NewDisasterRecoveryConfigsClientWithBaseURI(endpoint string) DisasterRecoveryConfigsClient { + return DisasterRecoveryConfigsClient{ + Client: autorest.NewClientWithUserAgent(userAgent()), + baseUri: endpoint, + } +} diff --git a/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/constants.go b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/constants.go new file mode 100644 index 000000000000..9b8503daa74b --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/constants.go @@ -0,0 +1,17 @@ +package disasterrecoveryconfigs + +type ProvisioningStateDR string + +const ( + ProvisioningStateDRAccepted ProvisioningStateDR = "Accepted" + ProvisioningStateDRFailed ProvisioningStateDR = "Failed" + ProvisioningStateDRSucceeded ProvisioningStateDR = "Succeeded" +) + +type RoleDisasterRecovery string + +const ( + RoleDisasterRecoveryPrimary RoleDisasterRecovery = "Primary" + RoleDisasterRecoveryPrimaryNotReplicating RoleDisasterRecovery = "PrimaryNotReplicating" + RoleDisasterRecoverySecondary RoleDisasterRecovery = "Secondary" +) diff --git a/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/id_disasterrecoveryconfig.go b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/id_disasterrecoveryconfig.go new file mode 100644 index 000000000000..51bd47558a9f --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/id_disasterrecoveryconfig.go @@ -0,0 +1,126 @@ +package disasterrecoveryconfigs + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +type DisasterRecoveryConfigId struct { + SubscriptionId string + ResourceGroup string + NamespaceName string + Name string +} + +func NewDisasterRecoveryConfigID(subscriptionId, resourceGroup, namespaceName, name string) DisasterRecoveryConfigId { + return DisasterRecoveryConfigId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + NamespaceName: namespaceName, + Name: name, + } +} + +func (id DisasterRecoveryConfigId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Namespace Name %q", id.NamespaceName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Disaster Recovery Config", segmentsStr) +} + +func (id DisasterRecoveryConfigId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.EventHub/namespaces/%s/disasterRecoveryConfigs/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.NamespaceName, id.Name) +} + +// DisasterRecoveryConfigID parses a DisasterRecoveryConfig ID into an DisasterRecoveryConfigId struct +func DisasterRecoveryConfigID(input string) (*DisasterRecoveryConfigId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := DisasterRecoveryConfigId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.NamespaceName, err = id.PopSegment("namespaces"); err != nil { + return nil, err + } + if resourceId.Name, err = id.PopSegment("disasterRecoveryConfigs"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} + +// DisasterRecoveryConfigIDInsensitively parses an DisasterRecoveryConfig ID into an DisasterRecoveryConfigId struct, insensitively +// This should only be used to parse an ID for rewriting to a consistent casing, +// the DisasterRecoveryConfigID method should be used instead for validation etc. +func DisasterRecoveryConfigIDInsensitively(input string) (*DisasterRecoveryConfigId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := DisasterRecoveryConfigId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + // find the correct casing for the 'namespaces' segment + namespacesKey := "namespaces" + for key := range id.Path { + if strings.EqualFold(key, namespacesKey) { + namespacesKey = key + break + } + } + if resourceId.NamespaceName, err = id.PopSegment(namespacesKey); err != nil { + return nil, err + } + + // find the correct casing for the 'disasterRecoveryConfigs' segment + disasterRecoveryConfigsKey := "disasterRecoveryConfigs" + for key := range id.Path { + if strings.EqualFold(key, disasterRecoveryConfigsKey) { + disasterRecoveryConfigsKey = key + break + } + } + if resourceId.Name, err = id.PopSegment(disasterRecoveryConfigsKey); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/id_disasterrecoveryconfig_test.go b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/id_disasterrecoveryconfig_test.go new file mode 100644 index 000000000000..b1b073916c5a --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/id_disasterrecoveryconfig_test.go @@ -0,0 +1,262 @@ +package disasterrecoveryconfigs + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.Id = DisasterRecoveryConfigId{} + +func TestDisasterRecoveryConfigIDFormatter(t *testing.T) { + actual := NewDisasterRecoveryConfigID("{subscriptionId}", "{resourceGroupName}", "{namespaceName}", "{alias}").ID() + expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestDisasterRecoveryConfigID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *DisasterRecoveryConfigId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}", + Expected: &DisasterRecoveryConfigId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{alias}", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.EVENTHUB/NAMESPACES/{NAMESPACENAME}/DISASTERRECOVERYCONFIGS/{ALIAS}", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := DisasterRecoveryConfigID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} + +func TestDisasterRecoveryConfigIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *DisasterRecoveryConfigId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}", + Expected: &DisasterRecoveryConfigId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{alias}", + }, + }, + + { + // lower-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterrecoveryconfigs/{alias}", + Expected: &DisasterRecoveryConfigId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{alias}", + }, + }, + + { + // upper-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NAMESPACES/{namespaceName}/DISASTERRECOVERYCONFIGS/{alias}", + Expected: &DisasterRecoveryConfigId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{alias}", + }, + }, + + { + // mixed-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NaMeSpAcEs/{namespaceName}/DiSaStErReCoVeRyCoNfIgS/{alias}", + Expected: &DisasterRecoveryConfigId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{alias}", + }, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := DisasterRecoveryConfigIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/id_namespace.go b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/id_namespace.go new file mode 100644 index 000000000000..4ac06e182e73 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/id_namespace.go @@ -0,0 +1,108 @@ +package disasterrecoveryconfigs + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +type NamespaceId struct { + SubscriptionId string + ResourceGroup string + Name string +} + +func NewNamespaceID(subscriptionId, resourceGroup, name string) NamespaceId { + return NamespaceId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + Name: name, + } +} + +func (id NamespaceId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Namespace", segmentsStr) +} + +func (id NamespaceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.EventHub/namespaces/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.Name) +} + +// NamespaceID parses a Namespace ID into an NamespaceId struct +func NamespaceID(input string) (*NamespaceId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := NamespaceId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.Name, err = id.PopSegment("namespaces"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} + +// NamespaceIDInsensitively parses an Namespace ID into an NamespaceId struct, insensitively +// This should only be used to parse an ID for rewriting to a consistent casing, +// the NamespaceID method should be used instead for validation etc. +func NamespaceIDInsensitively(input string) (*NamespaceId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := NamespaceId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + // find the correct casing for the 'namespaces' segment + namespacesKey := "namespaces" + for key := range id.Path { + if strings.EqualFold(key, namespacesKey) { + namespacesKey = key + break + } + } + if resourceId.Name, err = id.PopSegment(namespacesKey); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/id_namespace_test.go b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/id_namespace_test.go new file mode 100644 index 000000000000..19fa81ce8b8b --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/id_namespace_test.go @@ -0,0 +1,227 @@ +package disasterrecoveryconfigs + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.Id = NamespaceId{} + +func TestNamespaceIDFormatter(t *testing.T) { + actual := NewNamespaceID("{subscriptionId}", "{resourceGroupName}", "{namespaceName}").ID() + expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestNamespaceID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NamespaceId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.EVENTHUB/NAMESPACES/{NAMESPACENAME}", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := NamespaceID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} + +func TestNamespaceIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NamespaceId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // lower-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // upper-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NAMESPACES/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // mixed-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NaMeSpAcEs/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := NamespaceIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/method_breakpairing_autorest.go b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/method_breakpairing_autorest.go new file mode 100644 index 000000000000..3ba757ed4592 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/method_breakpairing_autorest.go @@ -0,0 +1,63 @@ +package disasterrecoveryconfigs + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type BreakPairingResponse struct { + HttpResponse *http.Response +} + +// BreakPairing ... +func (c DisasterRecoveryConfigsClient) BreakPairing(ctx context.Context, id DisasterRecoveryConfigId) (result BreakPairingResponse, err error) { + req, err := c.preparerForBreakPairing(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "disasterrecoveryconfigs.DisasterRecoveryConfigsClient", "BreakPairing", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "disasterrecoveryconfigs.DisasterRecoveryConfigsClient", "BreakPairing", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForBreakPairing(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "disasterrecoveryconfigs.DisasterRecoveryConfigsClient", "BreakPairing", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForBreakPairing prepares the BreakPairing request. +func (c DisasterRecoveryConfigsClient) preparerForBreakPairing(ctx context.Context, id DisasterRecoveryConfigId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/breakPairing", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForBreakPairing handles the response to the BreakPairing request. The method always +// closes the http.Response Body. +func (c DisasterRecoveryConfigsClient) responderForBreakPairing(resp *http.Response) (result BreakPairingResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/method_createorupdate_autorest.go b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/method_createorupdate_autorest.go new file mode 100644 index 000000000000..dab69562060e --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/method_createorupdate_autorest.go @@ -0,0 +1,65 @@ +package disasterrecoveryconfigs + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type CreateOrUpdateResponse struct { + HttpResponse *http.Response + Model *ArmDisasterRecovery +} + +// CreateOrUpdate ... +func (c DisasterRecoveryConfigsClient) CreateOrUpdate(ctx context.Context, id DisasterRecoveryConfigId, input ArmDisasterRecovery) (result CreateOrUpdateResponse, err error) { + req, err := c.preparerForCreateOrUpdate(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "disasterrecoveryconfigs.DisasterRecoveryConfigsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "disasterrecoveryconfigs.DisasterRecoveryConfigsClient", "CreateOrUpdate", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForCreateOrUpdate(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "disasterrecoveryconfigs.DisasterRecoveryConfigsClient", "CreateOrUpdate", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForCreateOrUpdate prepares the CreateOrUpdate request. +func (c DisasterRecoveryConfigsClient) preparerForCreateOrUpdate(ctx context.Context, id DisasterRecoveryConfigId, input ArmDisasterRecovery) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForCreateOrUpdate handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (c DisasterRecoveryConfigsClient) responderForCreateOrUpdate(resp *http.Response) (result CreateOrUpdateResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/method_delete_autorest.go b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/method_delete_autorest.go new file mode 100644 index 000000000000..320d68d61182 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/method_delete_autorest.go @@ -0,0 +1,61 @@ +package disasterrecoveryconfigs + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type DeleteResponse struct { + HttpResponse *http.Response +} + +// Delete ... +func (c DisasterRecoveryConfigsClient) Delete(ctx context.Context, id DisasterRecoveryConfigId) (result DeleteResponse, err error) { + req, err := c.preparerForDelete(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "disasterrecoveryconfigs.DisasterRecoveryConfigsClient", "Delete", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "disasterrecoveryconfigs.DisasterRecoveryConfigsClient", "Delete", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForDelete(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "disasterrecoveryconfigs.DisasterRecoveryConfigsClient", "Delete", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForDelete prepares the Delete request. +func (c DisasterRecoveryConfigsClient) preparerForDelete(ctx context.Context, id DisasterRecoveryConfigId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForDelete handles the response to the Delete request. The method always +// closes the http.Response Body. +func (c DisasterRecoveryConfigsClient) responderForDelete(resp *http.Response) (result DeleteResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/method_failover_autorest.go b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/method_failover_autorest.go new file mode 100644 index 000000000000..5ae6747c2340 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/method_failover_autorest.go @@ -0,0 +1,63 @@ +package disasterrecoveryconfigs + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type FailOverResponse struct { + HttpResponse *http.Response +} + +// FailOver ... +func (c DisasterRecoveryConfigsClient) FailOver(ctx context.Context, id DisasterRecoveryConfigId) (result FailOverResponse, err error) { + req, err := c.preparerForFailOver(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "disasterrecoveryconfigs.DisasterRecoveryConfigsClient", "FailOver", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "disasterrecoveryconfigs.DisasterRecoveryConfigsClient", "FailOver", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForFailOver(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "disasterrecoveryconfigs.DisasterRecoveryConfigsClient", "FailOver", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForFailOver prepares the FailOver request. +func (c DisasterRecoveryConfigsClient) preparerForFailOver(ctx context.Context, id DisasterRecoveryConfigId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/failover", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForFailOver handles the response to the FailOver request. The method always +// closes the http.Response Body. +func (c DisasterRecoveryConfigsClient) responderForFailOver(resp *http.Response) (result FailOverResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/method_get_autorest.go b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/method_get_autorest.go new file mode 100644 index 000000000000..8c3ac7e3d10c --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/method_get_autorest.go @@ -0,0 +1,64 @@ +package disasterrecoveryconfigs + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type GetResponse struct { + HttpResponse *http.Response + Model *ArmDisasterRecovery +} + +// Get ... +func (c DisasterRecoveryConfigsClient) Get(ctx context.Context, id DisasterRecoveryConfigId) (result GetResponse, err error) { + req, err := c.preparerForGet(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "disasterrecoveryconfigs.DisasterRecoveryConfigsClient", "Get", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "disasterrecoveryconfigs.DisasterRecoveryConfigsClient", "Get", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForGet(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "disasterrecoveryconfigs.DisasterRecoveryConfigsClient", "Get", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForGet prepares the Get request. +func (c DisasterRecoveryConfigsClient) preparerForGet(ctx context.Context, id DisasterRecoveryConfigId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForGet handles the response to the Get request. The method always +// closes the http.Response Body. +func (c DisasterRecoveryConfigsClient) responderForGet(resp *http.Response) (result GetResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/method_list_autorest.go b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/method_list_autorest.go new file mode 100644 index 000000000000..d063c5df0e66 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/method_list_autorest.go @@ -0,0 +1,196 @@ +package disasterrecoveryconfigs + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type ListResponse struct { + HttpResponse *http.Response + Model *[]ArmDisasterRecovery + + nextLink *string + nextPageFunc func(ctx context.Context, nextLink string) (ListResponse, error) +} + +type ListCompleteResult struct { + Items []ArmDisasterRecovery +} + +func (r ListResponse) HasMore() bool { + return r.nextLink != nil +} + +func (r ListResponse) LoadMore(ctx context.Context) (resp ListResponse, err error) { + if !r.HasMore() { + err = fmt.Errorf("no more pages returned") + return + } + return r.nextPageFunc(ctx, *r.nextLink) +} + +type ArmDisasterRecoveryPredicate struct { + // TODO: implement me +} + +func (p ArmDisasterRecoveryPredicate) Matches(input ArmDisasterRecovery) bool { + // TODO: implement me + // if p.Name != nil && input.Name != *p.Name { + // return false + // } + + return true +} + +// List ... +func (c DisasterRecoveryConfigsClient) List(ctx context.Context, id NamespaceId) (resp ListResponse, err error) { + req, err := c.preparerForList(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "disasterrecoveryconfigs.DisasterRecoveryConfigsClient", "List", nil, "Failure preparing request") + return + } + + resp.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "disasterrecoveryconfigs.DisasterRecoveryConfigsClient", "List", resp.HttpResponse, "Failure sending request") + return + } + + resp, err = c.responderForList(resp.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "disasterrecoveryconfigs.DisasterRecoveryConfigsClient", "List", resp.HttpResponse, "Failure responding to request") + return + } + return +} + +// ListCompleteMatchingPredicate retrieves all of the results into a single object +func (c DisasterRecoveryConfigsClient) ListComplete(ctx context.Context, id NamespaceId) (ListCompleteResult, error) { + return c.ListCompleteMatchingPredicate(ctx, id, ArmDisasterRecoveryPredicate{}) +} + +// ListCompleteMatchingPredicate retrieves all of the results and then applied the predicate +func (c DisasterRecoveryConfigsClient) ListCompleteMatchingPredicate(ctx context.Context, id NamespaceId, predicate ArmDisasterRecoveryPredicate) (resp ListCompleteResult, err error) { + items := make([]ArmDisasterRecovery, 0) + + page, err := c.List(ctx, id) + if err != nil { + err = fmt.Errorf("loading the initial page: %+v", err) + return + } + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + for page.HasMore() { + page, err = page.LoadMore(ctx) + if err != nil { + err = fmt.Errorf("loading the next page: %+v", err) + return + } + + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + } + + out := ListCompleteResult{ + Items: items, + } + return out, nil +} + +// preparerForList prepares the List request. +func (c DisasterRecoveryConfigsClient) preparerForList(ctx context.Context, id NamespaceId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/disasterRecoveryConfigs", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// preparerForListWithNextLink prepares the List request with the given nextLink token. +func (c DisasterRecoveryConfigsClient) preparerForListWithNextLink(ctx context.Context, nextLink string) (*http.Request, error) { + uri, err := url.Parse(nextLink) + if err != nil { + return nil, fmt.Errorf("parsing nextLink %q: %+v", nextLink, err) + } + queryParameters := map[string]interface{}{} + for k, v := range uri.Query() { + if len(v) == 0 { + continue + } + val := v[0] + val = autorest.Encode("query", val) + queryParameters[k] = val + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(uri.Path), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForList handles the response to the List request. The method always +// closes the http.Response Body. +func (c DisasterRecoveryConfigsClient) responderForList(resp *http.Response) (result ListResponse, err error) { + type page struct { + Values []ArmDisasterRecovery `json:"value"` + NextLink *string `json:"nextLink"` + } + var respObj page + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&respObj), + autorest.ByClosing()) + result.HttpResponse = resp + result.Model = &respObj.Values + result.nextLink = respObj.NextLink + if respObj.NextLink != nil { + result.nextPageFunc = func(ctx context.Context, nextLink string) (result ListResponse, err error) { + req, err := c.preparerForListWithNextLink(ctx, nextLink) + if err != nil { + err = autorest.NewErrorWithError(err, "disasterrecoveryconfigs.DisasterRecoveryConfigsClient", "List", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "disasterrecoveryconfigs.DisasterRecoveryConfigsClient", "List", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForList(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "disasterrecoveryconfigs.DisasterRecoveryConfigsClient", "List", result.HttpResponse, "Failure responding to request") + return + } + + return + } + } + return +} diff --git a/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/model_armdisasterrecovery.go b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/model_armdisasterrecovery.go new file mode 100644 index 000000000000..ad640bdfc812 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/model_armdisasterrecovery.go @@ -0,0 +1,8 @@ +package disasterrecoveryconfigs + +type ArmDisasterRecovery struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *ArmDisasterRecoveryProperties `json:"properties,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/model_armdisasterrecoveryproperties.go b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/model_armdisasterrecoveryproperties.go new file mode 100644 index 000000000000..37d897cf4b75 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/model_armdisasterrecoveryproperties.go @@ -0,0 +1,9 @@ +package disasterrecoveryconfigs + +type ArmDisasterRecoveryProperties struct { + AlternateName *string `json:"alternateName,omitempty"` + PartnerNamespace *string `json:"partnerNamespace,omitempty"` + PendingReplicationOperationsCount *int64 `json:"pendingReplicationOperationsCount,omitempty"` + ProvisioningState *ProvisioningStateDR `json:"provisioningState,omitempty"` + Role *RoleDisasterRecovery `json:"role,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/version.go b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/version.go new file mode 100644 index 000000000000..abe3a3d08070 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/disasterrecoveryconfigs/version.go @@ -0,0 +1,9 @@ +package disasterrecoveryconfigs + +import "fmt" + +const defaultApiVersion = "2017-04-01" + +func userAgent() string { + return fmt.Sprintf("pandora/disasterrecoveryconfigs/%s", defaultApiVersion) +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubs/client.go b/azurerm/internal/services/eventhub/sdk/eventhubs/client.go new file mode 100644 index 000000000000..e1b02b6a55ec --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubs/client.go @@ -0,0 +1,15 @@ +package eventhubs + +import "github.com/Azure/go-autorest/autorest" + +type EventHubsClient struct { + Client autorest.Client + baseUri string +} + +func NewEventHubsClientWithBaseURI(endpoint string) EventHubsClient { + return EventHubsClient{ + Client: autorest.NewClientWithUserAgent(userAgent()), + baseUri: endpoint, + } +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubs/constants.go b/azurerm/internal/services/eventhub/sdk/eventhubs/constants.go new file mode 100644 index 000000000000..5678fd7222e6 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubs/constants.go @@ -0,0 +1,22 @@ +package eventhubs + +type EncodingCaptureDescription string + +const ( + EncodingCaptureDescriptionAvro EncodingCaptureDescription = "Avro" + EncodingCaptureDescriptionAvroDeflate EncodingCaptureDescription = "AvroDeflate" +) + +type EntityStatus string + +const ( + EntityStatusActive EntityStatus = "Active" + EntityStatusCreating EntityStatus = "Creating" + EntityStatusDeleting EntityStatus = "Deleting" + EntityStatusDisabled EntityStatus = "Disabled" + EntityStatusReceiveDisabled EntityStatus = "ReceiveDisabled" + EntityStatusRenaming EntityStatus = "Renaming" + EntityStatusRestoring EntityStatus = "Restoring" + EntityStatusSendDisabled EntityStatus = "SendDisabled" + EntityStatusUnknown EntityStatus = "Unknown" +) diff --git a/azurerm/internal/services/eventhub/sdk/eventhubs/id_authorizationrule.go b/azurerm/internal/services/eventhub/sdk/eventhubs/id_authorizationrule.go new file mode 100644 index 000000000000..937724a6c6bb --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubs/id_authorizationrule.go @@ -0,0 +1,144 @@ +package eventhubs + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +type AuthorizationRuleId struct { + SubscriptionId string + ResourceGroup string + NamespaceName string + EventhubName string + Name string +} + +func NewAuthorizationRuleID(subscriptionId, resourceGroup, namespaceName, eventhubName, name string) AuthorizationRuleId { + return AuthorizationRuleId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + NamespaceName: namespaceName, + EventhubName: eventhubName, + Name: name, + } +} + +func (id AuthorizationRuleId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Eventhub Name %q", id.EventhubName), + fmt.Sprintf("Namespace Name %q", id.NamespaceName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Authorization Rule", segmentsStr) +} + +func (id AuthorizationRuleId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.EventHub/namespaces/%s/eventhubs/%s/authorizationRules/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.NamespaceName, id.EventhubName, id.Name) +} + +// AuthorizationRuleID parses a AuthorizationRule ID into an AuthorizationRuleId struct +func AuthorizationRuleID(input string) (*AuthorizationRuleId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := AuthorizationRuleId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.NamespaceName, err = id.PopSegment("namespaces"); err != nil { + return nil, err + } + if resourceId.EventhubName, err = id.PopSegment("eventhubs"); err != nil { + return nil, err + } + if resourceId.Name, err = id.PopSegment("authorizationRules"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} + +// AuthorizationRuleIDInsensitively parses an AuthorizationRule ID into an AuthorizationRuleId struct, insensitively +// This should only be used to parse an ID for rewriting to a consistent casing, +// the AuthorizationRuleID method should be used instead for validation etc. +func AuthorizationRuleIDInsensitively(input string) (*AuthorizationRuleId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := AuthorizationRuleId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + // find the correct casing for the 'namespaces' segment + namespacesKey := "namespaces" + for key := range id.Path { + if strings.EqualFold(key, namespacesKey) { + namespacesKey = key + break + } + } + if resourceId.NamespaceName, err = id.PopSegment(namespacesKey); err != nil { + return nil, err + } + + // find the correct casing for the 'eventhubs' segment + eventhubsKey := "eventhubs" + for key := range id.Path { + if strings.EqualFold(key, eventhubsKey) { + eventhubsKey = key + break + } + } + if resourceId.EventhubName, err = id.PopSegment(eventhubsKey); err != nil { + return nil, err + } + + // find the correct casing for the 'authorizationRules' segment + authorizationRulesKey := "authorizationRules" + for key := range id.Path { + if strings.EqualFold(key, authorizationRulesKey) { + authorizationRulesKey = key + break + } + } + if resourceId.Name, err = id.PopSegment(authorizationRulesKey); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubs/id_authorizationrule_test.go b/azurerm/internal/services/eventhub/sdk/eventhubs/id_authorizationrule_test.go new file mode 100644 index 000000000000..ea6714d67768 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubs/id_authorizationrule_test.go @@ -0,0 +1,297 @@ +package eventhubs + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.Id = AuthorizationRuleId{} + +func TestAuthorizationRuleIDFormatter(t *testing.T) { + actual := NewAuthorizationRuleID("{subscriptionId}", "{resourceGroupName}", "{namespaceName}", "{eventHubName}", "{authorizationRuleName}").ID() + expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestAuthorizationRuleID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *AuthorizationRuleId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // missing EventhubName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/", + Error: true, + }, + + { + // missing value for EventhubName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}", + Expected: &AuthorizationRuleId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + EventhubName: "{eventHubName}", + Name: "{authorizationRuleName}", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.EVENTHUB/NAMESPACES/{NAMESPACENAME}/EVENTHUBS/{EVENTHUBNAME}/AUTHORIZATIONRULES/{AUTHORIZATIONRULENAME}", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := AuthorizationRuleID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + if actual.EventhubName != v.Expected.EventhubName { + t.Fatalf("Expected %q but got %q for EventhubName", v.Expected.EventhubName, actual.EventhubName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} + +func TestAuthorizationRuleIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *AuthorizationRuleId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // missing EventhubName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/", + Error: true, + }, + + { + // missing value for EventhubName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}", + Expected: &AuthorizationRuleId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + EventhubName: "{eventHubName}", + Name: "{authorizationRuleName}", + }, + }, + + { + // lower-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationrules/{authorizationRuleName}", + Expected: &AuthorizationRuleId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + EventhubName: "{eventHubName}", + Name: "{authorizationRuleName}", + }, + }, + + { + // upper-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NAMESPACES/{namespaceName}/EVENTHUBS/{eventHubName}/AUTHORIZATIONRULES/{authorizationRuleName}", + Expected: &AuthorizationRuleId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + EventhubName: "{eventHubName}", + Name: "{authorizationRuleName}", + }, + }, + + { + // mixed-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NaMeSpAcEs/{namespaceName}/EvEnThUbS/{eventHubName}/AuThOrIzAtIoNrUlEs/{authorizationRuleName}", + Expected: &AuthorizationRuleId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + EventhubName: "{eventHubName}", + Name: "{authorizationRuleName}", + }, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := AuthorizationRuleIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + if actual.EventhubName != v.Expected.EventhubName { + t.Fatalf("Expected %q but got %q for EventhubName", v.Expected.EventhubName, actual.EventhubName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubs/id_eventhub.go b/azurerm/internal/services/eventhub/sdk/eventhubs/id_eventhub.go new file mode 100644 index 000000000000..c8571fc6934a --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubs/id_eventhub.go @@ -0,0 +1,126 @@ +package eventhubs + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +type EventhubId struct { + SubscriptionId string + ResourceGroup string + NamespaceName string + Name string +} + +func NewEventhubID(subscriptionId, resourceGroup, namespaceName, name string) EventhubId { + return EventhubId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + NamespaceName: namespaceName, + Name: name, + } +} + +func (id EventhubId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Namespace Name %q", id.NamespaceName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Eventhub", segmentsStr) +} + +func (id EventhubId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.EventHub/namespaces/%s/eventhubs/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.NamespaceName, id.Name) +} + +// EventhubID parses a Eventhub ID into an EventhubId struct +func EventhubID(input string) (*EventhubId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := EventhubId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.NamespaceName, err = id.PopSegment("namespaces"); err != nil { + return nil, err + } + if resourceId.Name, err = id.PopSegment("eventhubs"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} + +// EventhubIDInsensitively parses an Eventhub ID into an EventhubId struct, insensitively +// This should only be used to parse an ID for rewriting to a consistent casing, +// the EventhubID method should be used instead for validation etc. +func EventhubIDInsensitively(input string) (*EventhubId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := EventhubId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + // find the correct casing for the 'namespaces' segment + namespacesKey := "namespaces" + for key := range id.Path { + if strings.EqualFold(key, namespacesKey) { + namespacesKey = key + break + } + } + if resourceId.NamespaceName, err = id.PopSegment(namespacesKey); err != nil { + return nil, err + } + + // find the correct casing for the 'eventhubs' segment + eventhubsKey := "eventhubs" + for key := range id.Path { + if strings.EqualFold(key, eventhubsKey) { + eventhubsKey = key + break + } + } + if resourceId.Name, err = id.PopSegment(eventhubsKey); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubs/id_eventhub_test.go b/azurerm/internal/services/eventhub/sdk/eventhubs/id_eventhub_test.go new file mode 100644 index 000000000000..f1ac190454ea --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubs/id_eventhub_test.go @@ -0,0 +1,262 @@ +package eventhubs + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.Id = EventhubId{} + +func TestEventhubIDFormatter(t *testing.T) { + actual := NewEventhubID("{subscriptionId}", "{resourceGroupName}", "{namespaceName}", "{eventHubName}").ID() + expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestEventhubID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *EventhubId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}", + Expected: &EventhubId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{eventHubName}", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.EVENTHUB/NAMESPACES/{NAMESPACENAME}/EVENTHUBS/{EVENTHUBNAME}", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := EventhubID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} + +func TestEventhubIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *EventhubId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for NamespaceName + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}", + Expected: &EventhubId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{eventHubName}", + }, + }, + + { + // lower-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}", + Expected: &EventhubId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{eventHubName}", + }, + }, + + { + // upper-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NAMESPACES/{namespaceName}/EVENTHUBS/{eventHubName}", + Expected: &EventhubId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{eventHubName}", + }, + }, + + { + // mixed-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NaMeSpAcEs/{namespaceName}/EvEnThUbS/{eventHubName}", + Expected: &EventhubId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + NamespaceName: "{namespaceName}", + Name: "{eventHubName}", + }, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := EventhubIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.NamespaceName != v.Expected.NamespaceName { + t.Fatalf("Expected %q but got %q for NamespaceName", v.Expected.NamespaceName, actual.NamespaceName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubs/id_namespace.go b/azurerm/internal/services/eventhub/sdk/eventhubs/id_namespace.go new file mode 100644 index 000000000000..9a9e26418e0d --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubs/id_namespace.go @@ -0,0 +1,108 @@ +package eventhubs + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +type NamespaceId struct { + SubscriptionId string + ResourceGroup string + Name string +} + +func NewNamespaceID(subscriptionId, resourceGroup, name string) NamespaceId { + return NamespaceId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + Name: name, + } +} + +func (id NamespaceId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Namespace", segmentsStr) +} + +func (id NamespaceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.EventHub/namespaces/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.Name) +} + +// NamespaceID parses a Namespace ID into an NamespaceId struct +func NamespaceID(input string) (*NamespaceId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := NamespaceId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.Name, err = id.PopSegment("namespaces"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} + +// NamespaceIDInsensitively parses an Namespace ID into an NamespaceId struct, insensitively +// This should only be used to parse an ID for rewriting to a consistent casing, +// the NamespaceID method should be used instead for validation etc. +func NamespaceIDInsensitively(input string) (*NamespaceId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := NamespaceId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + // find the correct casing for the 'namespaces' segment + namespacesKey := "namespaces" + for key := range id.Path { + if strings.EqualFold(key, namespacesKey) { + namespacesKey = key + break + } + } + if resourceId.Name, err = id.PopSegment(namespacesKey); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubs/id_namespace_test.go b/azurerm/internal/services/eventhub/sdk/eventhubs/id_namespace_test.go new file mode 100644 index 000000000000..846b13c9b60d --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubs/id_namespace_test.go @@ -0,0 +1,227 @@ +package eventhubs + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.Id = NamespaceId{} + +func TestNamespaceIDFormatter(t *testing.T) { + actual := NewNamespaceID("{subscriptionId}", "{resourceGroupName}", "{namespaceName}").ID() + expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestNamespaceID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NamespaceId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.EVENTHUB/NAMESPACES/{NAMESPACENAME}", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := NamespaceID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} + +func TestNamespaceIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NamespaceId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // lower-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // upper-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NAMESPACES/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // mixed-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NaMeSpAcEs/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := NamespaceIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubs/method_createorupdate_autorest.go b/azurerm/internal/services/eventhub/sdk/eventhubs/method_createorupdate_autorest.go new file mode 100644 index 000000000000..52d8de2d199b --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubs/method_createorupdate_autorest.go @@ -0,0 +1,65 @@ +package eventhubs + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type CreateOrUpdateResponse struct { + HttpResponse *http.Response + Model *Eventhub +} + +// CreateOrUpdate ... +func (c EventHubsClient) CreateOrUpdate(ctx context.Context, id EventhubId, input Eventhub) (result CreateOrUpdateResponse, err error) { + req, err := c.preparerForCreateOrUpdate(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubs.EventHubsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubs.EventHubsClient", "CreateOrUpdate", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForCreateOrUpdate(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubs.EventHubsClient", "CreateOrUpdate", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForCreateOrUpdate prepares the CreateOrUpdate request. +func (c EventHubsClient) preparerForCreateOrUpdate(ctx context.Context, id EventhubId, input Eventhub) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForCreateOrUpdate handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (c EventHubsClient) responderForCreateOrUpdate(resp *http.Response) (result CreateOrUpdateResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubs/method_delete_autorest.go b/azurerm/internal/services/eventhub/sdk/eventhubs/method_delete_autorest.go new file mode 100644 index 000000000000..f4cb855b7fd9 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubs/method_delete_autorest.go @@ -0,0 +1,61 @@ +package eventhubs + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type DeleteResponse struct { + HttpResponse *http.Response +} + +// Delete ... +func (c EventHubsClient) Delete(ctx context.Context, id EventhubId) (result DeleteResponse, err error) { + req, err := c.preparerForDelete(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubs.EventHubsClient", "Delete", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubs.EventHubsClient", "Delete", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForDelete(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubs.EventHubsClient", "Delete", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForDelete prepares the Delete request. +func (c EventHubsClient) preparerForDelete(ctx context.Context, id EventhubId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForDelete handles the response to the Delete request. The method always +// closes the http.Response Body. +func (c EventHubsClient) responderForDelete(resp *http.Response) (result DeleteResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubs/method_deleteauthorizationrule_autorest.go b/azurerm/internal/services/eventhub/sdk/eventhubs/method_deleteauthorizationrule_autorest.go new file mode 100644 index 000000000000..c02fde9b49a7 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubs/method_deleteauthorizationrule_autorest.go @@ -0,0 +1,61 @@ +package eventhubs + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type DeleteAuthorizationRuleResponse struct { + HttpResponse *http.Response +} + +// DeleteAuthorizationRule ... +func (c EventHubsClient) DeleteAuthorizationRule(ctx context.Context, id AuthorizationRuleId) (result DeleteAuthorizationRuleResponse, err error) { + req, err := c.preparerForDeleteAuthorizationRule(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubs.EventHubsClient", "DeleteAuthorizationRule", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubs.EventHubsClient", "DeleteAuthorizationRule", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForDeleteAuthorizationRule(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubs.EventHubsClient", "DeleteAuthorizationRule", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForDeleteAuthorizationRule prepares the DeleteAuthorizationRule request. +func (c EventHubsClient) preparerForDeleteAuthorizationRule(ctx context.Context, id AuthorizationRuleId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForDeleteAuthorizationRule handles the response to the DeleteAuthorizationRule request. The method always +// closes the http.Response Body. +func (c EventHubsClient) responderForDeleteAuthorizationRule(resp *http.Response) (result DeleteAuthorizationRuleResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubs/method_get_autorest.go b/azurerm/internal/services/eventhub/sdk/eventhubs/method_get_autorest.go new file mode 100644 index 000000000000..ab0524efd086 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubs/method_get_autorest.go @@ -0,0 +1,64 @@ +package eventhubs + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type GetResponse struct { + HttpResponse *http.Response + Model *Eventhub +} + +// Get ... +func (c EventHubsClient) Get(ctx context.Context, id EventhubId) (result GetResponse, err error) { + req, err := c.preparerForGet(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubs.EventHubsClient", "Get", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubs.EventHubsClient", "Get", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForGet(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubs.EventHubsClient", "Get", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForGet prepares the Get request. +func (c EventHubsClient) preparerForGet(ctx context.Context, id EventhubId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForGet handles the response to the Get request. The method always +// closes the http.Response Body. +func (c EventHubsClient) responderForGet(resp *http.Response) (result GetResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubs/method_getauthorizationrule_autorest.go b/azurerm/internal/services/eventhub/sdk/eventhubs/method_getauthorizationrule_autorest.go new file mode 100644 index 000000000000..94db5c02873b --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubs/method_getauthorizationrule_autorest.go @@ -0,0 +1,64 @@ +package eventhubs + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type GetAuthorizationRuleResponse struct { + HttpResponse *http.Response + Model *AuthorizationRule +} + +// GetAuthorizationRule ... +func (c EventHubsClient) GetAuthorizationRule(ctx context.Context, id AuthorizationRuleId) (result GetAuthorizationRuleResponse, err error) { + req, err := c.preparerForGetAuthorizationRule(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubs.EventHubsClient", "GetAuthorizationRule", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubs.EventHubsClient", "GetAuthorizationRule", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForGetAuthorizationRule(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubs.EventHubsClient", "GetAuthorizationRule", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForGetAuthorizationRule prepares the GetAuthorizationRule request. +func (c EventHubsClient) preparerForGetAuthorizationRule(ctx context.Context, id AuthorizationRuleId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForGetAuthorizationRule handles the response to the GetAuthorizationRule request. The method always +// closes the http.Response Body. +func (c EventHubsClient) responderForGetAuthorizationRule(resp *http.Response) (result GetAuthorizationRuleResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubs/method_listbynamespace_autorest.go b/azurerm/internal/services/eventhub/sdk/eventhubs/method_listbynamespace_autorest.go new file mode 100644 index 000000000000..5b00d66c26bc --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubs/method_listbynamespace_autorest.go @@ -0,0 +1,223 @@ +package eventhubs + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type ListByNamespaceResponse struct { + HttpResponse *http.Response + Model *[]Eventhub + + nextLink *string + nextPageFunc func(ctx context.Context, nextLink string) (ListByNamespaceResponse, error) +} + +type ListByNamespaceCompleteResult struct { + Items []Eventhub +} + +func (r ListByNamespaceResponse) HasMore() bool { + return r.nextLink != nil +} + +func (r ListByNamespaceResponse) LoadMore(ctx context.Context) (resp ListByNamespaceResponse, err error) { + if !r.HasMore() { + err = fmt.Errorf("no more pages returned") + return + } + return r.nextPageFunc(ctx, *r.nextLink) +} + +type ListByNamespaceOptions struct { + Skip *int64 + Top *int64 +} + +func DefaultListByNamespaceOptions() ListByNamespaceOptions { + return ListByNamespaceOptions{} +} + +func (o ListByNamespaceOptions) toQueryString() map[string]interface{} { + out := make(map[string]interface{}) + + if o.Skip != nil { + out["$skip"] = *o.Skip + } + + if o.Top != nil { + out["$top"] = *o.Top + } + + return out +} + +type EventhubPredicate struct { + // TODO: implement me +} + +func (p EventhubPredicate) Matches(input Eventhub) bool { + // TODO: implement me + // if p.Name != nil && input.Name != *p.Name { + // return false + // } + + return true +} + +// ListByNamespace ... +func (c EventHubsClient) ListByNamespace(ctx context.Context, id NamespaceId, options ListByNamespaceOptions) (resp ListByNamespaceResponse, err error) { + req, err := c.preparerForListByNamespace(ctx, id, options) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubs.EventHubsClient", "ListByNamespace", nil, "Failure preparing request") + return + } + + resp.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubs.EventHubsClient", "ListByNamespace", resp.HttpResponse, "Failure sending request") + return + } + + resp, err = c.responderForListByNamespace(resp.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubs.EventHubsClient", "ListByNamespace", resp.HttpResponse, "Failure responding to request") + return + } + return +} + +// ListByNamespaceCompleteMatchingPredicate retrieves all of the results into a single object +func (c EventHubsClient) ListByNamespaceComplete(ctx context.Context, id NamespaceId, options ListByNamespaceOptions) (ListByNamespaceCompleteResult, error) { + return c.ListByNamespaceCompleteMatchingPredicate(ctx, id, options, EventhubPredicate{}) +} + +// ListByNamespaceCompleteMatchingPredicate retrieves all of the results and then applied the predicate +func (c EventHubsClient) ListByNamespaceCompleteMatchingPredicate(ctx context.Context, id NamespaceId, options ListByNamespaceOptions, predicate EventhubPredicate) (resp ListByNamespaceCompleteResult, err error) { + items := make([]Eventhub, 0) + + page, err := c.ListByNamespace(ctx, id, options) + if err != nil { + err = fmt.Errorf("loading the initial page: %+v", err) + return + } + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + for page.HasMore() { + page, err = page.LoadMore(ctx) + if err != nil { + err = fmt.Errorf("loading the next page: %+v", err) + return + } + + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + } + + out := ListByNamespaceCompleteResult{ + Items: items, + } + return out, nil +} + +// preparerForListByNamespace prepares the ListByNamespace request. +func (c EventHubsClient) preparerForListByNamespace(ctx context.Context, id NamespaceId, options ListByNamespaceOptions) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + for k, v := range options.toQueryString() { + queryParameters[k] = autorest.Encode("query", v) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/eventhubs", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// preparerForListByNamespaceWithNextLink prepares the ListByNamespace request with the given nextLink token. +func (c EventHubsClient) preparerForListByNamespaceWithNextLink(ctx context.Context, nextLink string) (*http.Request, error) { + uri, err := url.Parse(nextLink) + if err != nil { + return nil, fmt.Errorf("parsing nextLink %q: %+v", nextLink, err) + } + queryParameters := map[string]interface{}{} + for k, v := range uri.Query() { + if len(v) == 0 { + continue + } + val := v[0] + val = autorest.Encode("query", val) + queryParameters[k] = val + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(uri.Path), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForListByNamespace handles the response to the ListByNamespace request. The method always +// closes the http.Response Body. +func (c EventHubsClient) responderForListByNamespace(resp *http.Response) (result ListByNamespaceResponse, err error) { + type page struct { + Values []Eventhub `json:"value"` + NextLink *string `json:"nextLink"` + } + var respObj page + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&respObj), + autorest.ByClosing()) + result.HttpResponse = resp + result.Model = &respObj.Values + result.nextLink = respObj.NextLink + if respObj.NextLink != nil { + result.nextPageFunc = func(ctx context.Context, nextLink string) (result ListByNamespaceResponse, err error) { + req, err := c.preparerForListByNamespaceWithNextLink(ctx, nextLink) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubs.EventHubsClient", "ListByNamespace", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubs.EventHubsClient", "ListByNamespace", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForListByNamespace(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubs.EventHubsClient", "ListByNamespace", result.HttpResponse, "Failure responding to request") + return + } + + return + } + } + return +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubs/model_authorizationrule.go b/azurerm/internal/services/eventhub/sdk/eventhubs/model_authorizationrule.go new file mode 100644 index 000000000000..a2e17cad8ae7 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubs/model_authorizationrule.go @@ -0,0 +1,8 @@ +package eventhubs + +type AuthorizationRule struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *AuthorizationRuleProperties `json:"properties,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubs/model_authorizationruleproperties.go b/azurerm/internal/services/eventhub/sdk/eventhubs/model_authorizationruleproperties.go new file mode 100644 index 000000000000..5e68e5e0f15c --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubs/model_authorizationruleproperties.go @@ -0,0 +1,5 @@ +package eventhubs + +type AuthorizationRuleProperties struct { + Rights []string `json:"rights"` +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubs/model_capturedescription.go b/azurerm/internal/services/eventhub/sdk/eventhubs/model_capturedescription.go new file mode 100644 index 000000000000..2afde4372c4e --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubs/model_capturedescription.go @@ -0,0 +1,10 @@ +package eventhubs + +type CaptureDescription struct { + Destination *Destination `json:"destination,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + Encoding *EncodingCaptureDescription `json:"encoding,omitempty"` + IntervalInSeconds *int64 `json:"intervalInSeconds,omitempty"` + SizeLimitInBytes *int64 `json:"sizeLimitInBytes,omitempty"` + SkipEmptyArchives *bool `json:"skipEmptyArchives,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubs/model_destination.go b/azurerm/internal/services/eventhub/sdk/eventhubs/model_destination.go new file mode 100644 index 000000000000..85aecab3ecaa --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubs/model_destination.go @@ -0,0 +1,6 @@ +package eventhubs + +type Destination struct { + Name *string `json:"name,omitempty"` + Properties *DestinationProperties `json:"properties,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubs/model_destinationproperties.go b/azurerm/internal/services/eventhub/sdk/eventhubs/model_destinationproperties.go new file mode 100644 index 000000000000..49eccb893e19 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubs/model_destinationproperties.go @@ -0,0 +1,7 @@ +package eventhubs + +type DestinationProperties struct { + ArchiveNameFormat *string `json:"archiveNameFormat,omitempty"` + BlobContainer *string `json:"blobContainer,omitempty"` + StorageAccountResourceId *string `json:"storageAccountResourceId,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubs/model_eventhub.go b/azurerm/internal/services/eventhub/sdk/eventhubs/model_eventhub.go new file mode 100644 index 000000000000..cc9b2e315b9e --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubs/model_eventhub.go @@ -0,0 +1,8 @@ +package eventhubs + +type Eventhub struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *EventhubProperties `json:"properties,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubs/model_eventhubproperties.go b/azurerm/internal/services/eventhub/sdk/eventhubs/model_eventhubproperties.go new file mode 100644 index 000000000000..0f35f6f03dff --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubs/model_eventhubproperties.go @@ -0,0 +1,35 @@ +package eventhubs + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/formatting" +) + +type EventhubProperties struct { + CaptureDescription *CaptureDescription `json:"captureDescription,omitempty"` + CreatedAt *string `json:"createdAt,omitempty"` + MessageRetentionInDays *int64 `json:"messageRetentionInDays,omitempty"` + PartitionCount *int64 `json:"partitionCount,omitempty"` + PartitionIds *[]string `json:"partitionIds,omitempty"` + Status *EntityStatus `json:"status,omitempty"` + UpdatedAt *string `json:"updatedAt,omitempty"` +} + +func (o EventhubProperties) ListCreatedAtAsTime() (*time.Time, error) { + return formatting.ParseAsDateFormat(o.CreatedAt, "2006-01-02T15:04:05Z07:00") +} + +func (o EventhubProperties) SetCreatedAtAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CreatedAt = &formatted +} + +func (o EventhubProperties) ListUpdatedAtAsTime() (*time.Time, error) { + return formatting.ParseAsDateFormat(o.UpdatedAt, "2006-01-02T15:04:05Z07:00") +} + +func (o EventhubProperties) SetUpdatedAtAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.UpdatedAt = &formatted +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubs/version.go b/azurerm/internal/services/eventhub/sdk/eventhubs/version.go new file mode 100644 index 000000000000..dffa4edabc31 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubs/version.go @@ -0,0 +1,9 @@ +package eventhubs + +import "fmt" + +const defaultApiVersion = "2017-04-01" + +func userAgent() string { + return fmt.Sprintf("pandora/eventhubs/%s", defaultApiVersion) +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubsclusters/client.go b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/client.go new file mode 100644 index 000000000000..74371b447104 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/client.go @@ -0,0 +1,15 @@ +package eventhubsclusters + +import "github.com/Azure/go-autorest/autorest" + +type EventHubsClustersClient struct { + Client autorest.Client + baseUri string +} + +func NewEventHubsClustersClientWithBaseURI(endpoint string) EventHubsClustersClient { + return EventHubsClustersClient{ + Client: autorest.NewClientWithUserAgent(userAgent()), + baseUri: endpoint, + } +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubsclusters/constants.go b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/constants.go new file mode 100644 index 000000000000..e548aca5d130 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/constants.go @@ -0,0 +1,7 @@ +package eventhubsclusters + +type ClusterSkuName string + +const ( + ClusterSkuNameDedicated ClusterSkuName = "Dedicated" +) diff --git a/azurerm/internal/services/eventhub/sdk/eventhubsclusters/id_cluster.go b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/id_cluster.go new file mode 100644 index 000000000000..ecfbf842ddd0 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/id_cluster.go @@ -0,0 +1,108 @@ +package eventhubsclusters + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +type ClusterId struct { + SubscriptionId string + ResourceGroup string + Name string +} + +func NewClusterID(subscriptionId, resourceGroup, name string) ClusterId { + return ClusterId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + Name: name, + } +} + +func (id ClusterId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Cluster", segmentsStr) +} + +func (id ClusterId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.EventHub/clusters/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.Name) +} + +// ClusterID parses a Cluster ID into an ClusterId struct +func ClusterID(input string) (*ClusterId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := ClusterId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.Name, err = id.PopSegment("clusters"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} + +// ClusterIDInsensitively parses an Cluster ID into an ClusterId struct, insensitively +// This should only be used to parse an ID for rewriting to a consistent casing, +// the ClusterID method should be used instead for validation etc. +func ClusterIDInsensitively(input string) (*ClusterId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := ClusterId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + // find the correct casing for the 'clusters' segment + clustersKey := "clusters" + for key := range id.Path { + if strings.EqualFold(key, clustersKey) { + clustersKey = key + break + } + } + if resourceId.Name, err = id.PopSegment(clustersKey); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubsclusters/id_cluster_test.go b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/id_cluster_test.go new file mode 100644 index 000000000000..09ff222de918 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/id_cluster_test.go @@ -0,0 +1,227 @@ +package eventhubsclusters + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.Id = ClusterId{} + +func TestClusterIDFormatter(t *testing.T) { + actual := NewClusterID("{subscriptionId}", "{resourceGroupName}", "{clusterName}").ID() + expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/{clusterName}" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestClusterID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ClusterId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/{clusterName}", + Expected: &ClusterId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{clusterName}", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.EVENTHUB/CLUSTERS/{CLUSTERNAME}", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ClusterID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} + +func TestClusterIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ClusterId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/{clusterName}", + Expected: &ClusterId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{clusterName}", + }, + }, + + { + // lower-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/{clusterName}", + Expected: &ClusterId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{clusterName}", + }, + }, + + { + // upper-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/CLUSTERS/{clusterName}", + Expected: &ClusterId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{clusterName}", + }, + }, + + { + // mixed-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/ClUsTeRs/{clusterName}", + Expected: &ClusterId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{clusterName}", + }, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ClusterIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubsclusters/id_resourcegroup.go b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/id_resourcegroup.go new file mode 100644 index 000000000000..f9f56f915618 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/id_resourcegroup.go @@ -0,0 +1,89 @@ +package eventhubsclusters + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +type ResourceGroupId struct { + SubscriptionId string + ResourceGroup string +} + +func NewResourceGroupID(subscriptionId, resourceGroup string) ResourceGroupId { + return ResourceGroupId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + } +} + +func (id ResourceGroupId) String() string { + segments := []string{ + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Resource Group", segmentsStr) +} + +func (id ResourceGroupId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup) +} + +// ResourceGroupID parses a ResourceGroup ID into an ResourceGroupId struct +func ResourceGroupID(input string) (*ResourceGroupId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := ResourceGroupId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} + +// ResourceGroupIDInsensitively parses an ResourceGroup ID into an ResourceGroupId struct, insensitively +// This should only be used to parse an ID for rewriting to a consistent casing, +// the ResourceGroupID method should be used instead for validation etc. +func ResourceGroupIDInsensitively(input string) (*ResourceGroupId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := ResourceGroupId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubsclusters/id_resourcegroup_test.go b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/id_resourcegroup_test.go new file mode 100644 index 000000000000..816da7593a28 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/id_resourcegroup_test.go @@ -0,0 +1,192 @@ +package eventhubsclusters + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.Id = ResourceGroupId{} + +func TestResourceGroupIDFormatter(t *testing.T) { + actual := NewResourceGroupID("{subscriptionId}", "{resourceGroupName}").ID() + expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestResourceGroupID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ResourceGroupId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + Expected: &ResourceGroupId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ResourceGroupID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + } +} + +func TestResourceGroupIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ResourceGroupId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + Expected: &ResourceGroupId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + }, + }, + + { + // lower-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + Expected: &ResourceGroupId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + }, + }, + + { + // upper-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + Expected: &ResourceGroupId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + }, + }, + + { + // mixed-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + Expected: &ResourceGroupId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + }, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ResourceGroupIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + } +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubsclusters/method_clusterscreateorupdate_autorest.go b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/method_clusterscreateorupdate_autorest.go new file mode 100644 index 000000000000..e2e0bbc597d8 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/method_clusterscreateorupdate_autorest.go @@ -0,0 +1,75 @@ +package eventhubsclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/polling" +) + +type ClustersCreateOrUpdateResponse struct { + Poller polling.LongRunningPoller + HttpResponse *http.Response +} + +// ClustersCreateOrUpdate ... +func (c EventHubsClustersClient) ClustersCreateOrUpdate(ctx context.Context, id ClusterId, input Cluster) (result ClustersCreateOrUpdateResponse, err error) { + req, err := c.preparerForClustersCreateOrUpdate(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubsclusters.EventHubsClustersClient", "ClustersCreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = c.senderForClustersCreateOrUpdate(ctx, req) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubsclusters.EventHubsClustersClient", "ClustersCreateOrUpdate", result.HttpResponse, "Failure sending request") + return + } + + return +} + +// ClustersCreateOrUpdateThenPoll performs ClustersCreateOrUpdate then polls until it's completed +func (c EventHubsClustersClient) ClustersCreateOrUpdateThenPoll(ctx context.Context, id ClusterId, input Cluster) error { + result, err := c.ClustersCreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing ClustersCreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(); err != nil { + return fmt.Errorf("polling after ClustersCreateOrUpdate: %+v", err) + } + + return nil +} + +// preparerForClustersCreateOrUpdate prepares the ClustersCreateOrUpdate request. +func (c EventHubsClustersClient) preparerForClustersCreateOrUpdate(ctx context.Context, id ClusterId, input Cluster) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// senderForClustersCreateOrUpdate sends the ClustersCreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (c EventHubsClustersClient) senderForClustersCreateOrUpdate(ctx context.Context, req *http.Request) (future ClustersCreateOrUpdateResponse, err error) { + var resp *http.Response + resp, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + return + } + future.Poller, err = polling.NewLongRunningPollerFromResponse(ctx, resp, c.Client) + return +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubsclusters/method_clustersdelete_autorest.go b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/method_clustersdelete_autorest.go new file mode 100644 index 000000000000..f1fa2cfd0342 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/method_clustersdelete_autorest.go @@ -0,0 +1,73 @@ +package eventhubsclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/polling" +) + +type ClustersDeleteResponse struct { + Poller polling.LongRunningPoller + HttpResponse *http.Response +} + +// ClustersDelete ... +func (c EventHubsClustersClient) ClustersDelete(ctx context.Context, id ClusterId) (result ClustersDeleteResponse, err error) { + req, err := c.preparerForClustersDelete(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubsclusters.EventHubsClustersClient", "ClustersDelete", nil, "Failure preparing request") + return + } + + result, err = c.senderForClustersDelete(ctx, req) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubsclusters.EventHubsClustersClient", "ClustersDelete", result.HttpResponse, "Failure sending request") + return + } + + return +} + +// ClustersDeleteThenPoll performs ClustersDelete then polls until it's completed +func (c EventHubsClustersClient) ClustersDeleteThenPoll(ctx context.Context, id ClusterId) error { + result, err := c.ClustersDelete(ctx, id) + if err != nil { + return fmt.Errorf("performing ClustersDelete: %+v", err) + } + + if err := result.Poller.PollUntilDone(); err != nil { + return fmt.Errorf("polling after ClustersDelete: %+v", err) + } + + return nil +} + +// preparerForClustersDelete prepares the ClustersDelete request. +func (c EventHubsClustersClient) preparerForClustersDelete(ctx context.Context, id ClusterId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// senderForClustersDelete sends the ClustersDelete request. The method will close the +// http.Response Body if it receives an error. +func (c EventHubsClustersClient) senderForClustersDelete(ctx context.Context, req *http.Request) (future ClustersDeleteResponse, err error) { + var resp *http.Response + resp, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + return + } + future.Poller, err = polling.NewLongRunningPollerFromResponse(ctx, resp, c.Client) + return +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubsclusters/method_clustersget_autorest.go b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/method_clustersget_autorest.go new file mode 100644 index 000000000000..96d3a5647500 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/method_clustersget_autorest.go @@ -0,0 +1,64 @@ +package eventhubsclusters + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type ClustersGetResponse struct { + HttpResponse *http.Response + Model *Cluster +} + +// ClustersGet ... +func (c EventHubsClustersClient) ClustersGet(ctx context.Context, id ClusterId) (result ClustersGetResponse, err error) { + req, err := c.preparerForClustersGet(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubsclusters.EventHubsClustersClient", "ClustersGet", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubsclusters.EventHubsClustersClient", "ClustersGet", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForClustersGet(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubsclusters.EventHubsClustersClient", "ClustersGet", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForClustersGet prepares the ClustersGet request. +func (c EventHubsClustersClient) preparerForClustersGet(ctx context.Context, id ClusterId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForClustersGet handles the response to the ClustersGet request. The method always +// closes the http.Response Body. +func (c EventHubsClustersClient) responderForClustersGet(resp *http.Response) (result ClustersGetResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubsclusters/method_clusterslistbyresourcegroup_autorest.go b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/method_clusterslistbyresourcegroup_autorest.go new file mode 100644 index 000000000000..92c4e2159dca --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/method_clusterslistbyresourcegroup_autorest.go @@ -0,0 +1,196 @@ +package eventhubsclusters + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type ClustersListByResourceGroupResponse struct { + HttpResponse *http.Response + Model *[]Cluster + + nextLink *string + nextPageFunc func(ctx context.Context, nextLink string) (ClustersListByResourceGroupResponse, error) +} + +type ClustersListByResourceGroupCompleteResult struct { + Items []Cluster +} + +func (r ClustersListByResourceGroupResponse) HasMore() bool { + return r.nextLink != nil +} + +func (r ClustersListByResourceGroupResponse) LoadMore(ctx context.Context) (resp ClustersListByResourceGroupResponse, err error) { + if !r.HasMore() { + err = fmt.Errorf("no more pages returned") + return + } + return r.nextPageFunc(ctx, *r.nextLink) +} + +type ClusterPredicate struct { + // TODO: implement me +} + +func (p ClusterPredicate) Matches(input Cluster) bool { + // TODO: implement me + // if p.Name != nil && input.Name != *p.Name { + // return false + // } + + return true +} + +// ClustersListByResourceGroup ... +func (c EventHubsClustersClient) ClustersListByResourceGroup(ctx context.Context, id ResourceGroupId) (resp ClustersListByResourceGroupResponse, err error) { + req, err := c.preparerForClustersListByResourceGroup(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubsclusters.EventHubsClustersClient", "ClustersListByResourceGroup", nil, "Failure preparing request") + return + } + + resp.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubsclusters.EventHubsClustersClient", "ClustersListByResourceGroup", resp.HttpResponse, "Failure sending request") + return + } + + resp, err = c.responderForClustersListByResourceGroup(resp.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubsclusters.EventHubsClustersClient", "ClustersListByResourceGroup", resp.HttpResponse, "Failure responding to request") + return + } + return +} + +// ClustersListByResourceGroupCompleteMatchingPredicate retrieves all of the results into a single object +func (c EventHubsClustersClient) ClustersListByResourceGroupComplete(ctx context.Context, id ResourceGroupId) (ClustersListByResourceGroupCompleteResult, error) { + return c.ClustersListByResourceGroupCompleteMatchingPredicate(ctx, id, ClusterPredicate{}) +} + +// ClustersListByResourceGroupCompleteMatchingPredicate retrieves all of the results and then applied the predicate +func (c EventHubsClustersClient) ClustersListByResourceGroupCompleteMatchingPredicate(ctx context.Context, id ResourceGroupId, predicate ClusterPredicate) (resp ClustersListByResourceGroupCompleteResult, err error) { + items := make([]Cluster, 0) + + page, err := c.ClustersListByResourceGroup(ctx, id) + if err != nil { + err = fmt.Errorf("loading the initial page: %+v", err) + return + } + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + for page.HasMore() { + page, err = page.LoadMore(ctx) + if err != nil { + err = fmt.Errorf("loading the next page: %+v", err) + return + } + + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + } + + out := ClustersListByResourceGroupCompleteResult{ + Items: items, + } + return out, nil +} + +// preparerForClustersListByResourceGroup prepares the ClustersListByResourceGroup request. +func (c EventHubsClustersClient) preparerForClustersListByResourceGroup(ctx context.Context, id ResourceGroupId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/providers/Microsoft.EventHub/clusters", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// preparerForClustersListByResourceGroupWithNextLink prepares the ClustersListByResourceGroup request with the given nextLink token. +func (c EventHubsClustersClient) preparerForClustersListByResourceGroupWithNextLink(ctx context.Context, nextLink string) (*http.Request, error) { + uri, err := url.Parse(nextLink) + if err != nil { + return nil, fmt.Errorf("parsing nextLink %q: %+v", nextLink, err) + } + queryParameters := map[string]interface{}{} + for k, v := range uri.Query() { + if len(v) == 0 { + continue + } + val := v[0] + val = autorest.Encode("query", val) + queryParameters[k] = val + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(uri.Path), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForClustersListByResourceGroup handles the response to the ClustersListByResourceGroup request. The method always +// closes the http.Response Body. +func (c EventHubsClustersClient) responderForClustersListByResourceGroup(resp *http.Response) (result ClustersListByResourceGroupResponse, err error) { + type page struct { + Values []Cluster `json:"value"` + NextLink *string `json:"nextLink"` + } + var respObj page + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&respObj), + autorest.ByClosing()) + result.HttpResponse = resp + result.Model = &respObj.Values + result.nextLink = respObj.NextLink + if respObj.NextLink != nil { + result.nextPageFunc = func(ctx context.Context, nextLink string) (result ClustersListByResourceGroupResponse, err error) { + req, err := c.preparerForClustersListByResourceGroupWithNextLink(ctx, nextLink) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubsclusters.EventHubsClustersClient", "ClustersListByResourceGroup", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubsclusters.EventHubsClustersClient", "ClustersListByResourceGroup", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForClustersListByResourceGroup(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubsclusters.EventHubsClustersClient", "ClustersListByResourceGroup", result.HttpResponse, "Failure responding to request") + return + } + + return + } + } + return +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubsclusters/method_clustersupdate_autorest.go b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/method_clustersupdate_autorest.go new file mode 100644 index 000000000000..7e5f83c316d4 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/method_clustersupdate_autorest.go @@ -0,0 +1,75 @@ +package eventhubsclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/polling" +) + +type ClustersUpdateResponse struct { + Poller polling.LongRunningPoller + HttpResponse *http.Response +} + +// ClustersUpdate ... +func (c EventHubsClustersClient) ClustersUpdate(ctx context.Context, id ClusterId, input Cluster) (result ClustersUpdateResponse, err error) { + req, err := c.preparerForClustersUpdate(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubsclusters.EventHubsClustersClient", "ClustersUpdate", nil, "Failure preparing request") + return + } + + result, err = c.senderForClustersUpdate(ctx, req) + if err != nil { + err = autorest.NewErrorWithError(err, "eventhubsclusters.EventHubsClustersClient", "ClustersUpdate", result.HttpResponse, "Failure sending request") + return + } + + return +} + +// ClustersUpdateThenPoll performs ClustersUpdate then polls until it's completed +func (c EventHubsClustersClient) ClustersUpdateThenPoll(ctx context.Context, id ClusterId, input Cluster) error { + result, err := c.ClustersUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing ClustersUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(); err != nil { + return fmt.Errorf("polling after ClustersUpdate: %+v", err) + } + + return nil +} + +// preparerForClustersUpdate prepares the ClustersUpdate request. +func (c EventHubsClustersClient) preparerForClustersUpdate(ctx context.Context, id ClusterId, input Cluster) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// senderForClustersUpdate sends the ClustersUpdate request. The method will close the +// http.Response Body if it receives an error. +func (c EventHubsClustersClient) senderForClustersUpdate(ctx context.Context, req *http.Request) (future ClustersUpdateResponse, err error) { + var resp *http.Response + resp, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + return + } + future.Poller, err = polling.NewLongRunningPollerFromResponse(ctx, resp, c.Client) + return +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubsclusters/model_cluster.go b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/model_cluster.go new file mode 100644 index 000000000000..260e1a04fa60 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/model_cluster.go @@ -0,0 +1,11 @@ +package eventhubsclusters + +type Cluster struct { + Id *string `json:"id,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *ClusterProperties `json:"properties,omitempty"` + Sku *ClusterSku `json:"sku,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubsclusters/model_clusterproperties.go b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/model_clusterproperties.go new file mode 100644 index 000000000000..79c147430340 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/model_clusterproperties.go @@ -0,0 +1,8 @@ +package eventhubsclusters + +type ClusterProperties struct { + CreatedAt *string `json:"createdAt,omitempty"` + MetricId *string `json:"metricId,omitempty"` + Status *string `json:"status,omitempty"` + UpdatedAt *string `json:"updatedAt,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubsclusters/model_clustersku.go b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/model_clustersku.go new file mode 100644 index 000000000000..49f86a763946 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/model_clustersku.go @@ -0,0 +1,6 @@ +package eventhubsclusters + +type ClusterSku struct { + Capacity *int64 `json:"capacity,omitempty"` + Name ClusterSkuName `json:"name"` +} diff --git a/azurerm/internal/services/eventhub/sdk/eventhubsclusters/version.go b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/version.go new file mode 100644 index 000000000000..530a3e235a12 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/eventhubsclusters/version.go @@ -0,0 +1,9 @@ +package eventhubsclusters + +import "fmt" + +const defaultApiVersion = "2018-01-01-preview" + +func userAgent() string { + return fmt.Sprintf("pandora/eventhubsclusters/%s", defaultApiVersion) +} diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/client.go b/azurerm/internal/services/eventhub/sdk/namespaces/client.go new file mode 100644 index 000000000000..ab1ab10a48e7 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/client.go @@ -0,0 +1,15 @@ +package namespaces + +import "github.com/Azure/go-autorest/autorest" + +type NamespacesClient struct { + Client autorest.Client + baseUri string +} + +func NewNamespacesClientWithBaseURI(endpoint string) NamespacesClient { + return NamespacesClient{ + Client: autorest.NewClientWithUserAgent(userAgent()), + baseUri: endpoint, + } +} diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/constants.go b/azurerm/internal/services/eventhub/sdk/namespaces/constants.go new file mode 100644 index 000000000000..518f6346ba4a --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/constants.go @@ -0,0 +1,61 @@ +package namespaces + +type CreatedByType string + +const ( + CreatedByTypeApplication CreatedByType = "Application" + CreatedByTypeKey CreatedByType = "Key" + CreatedByTypeManagedIdentity CreatedByType = "ManagedIdentity" + CreatedByTypeUser CreatedByType = "User" +) + +type EndPointProvisioningState string + +const ( + EndPointProvisioningStateCanceled EndPointProvisioningState = "Canceled" + EndPointProvisioningStateCreating EndPointProvisioningState = "Creating" + EndPointProvisioningStateDeleting EndPointProvisioningState = "Deleting" + EndPointProvisioningStateFailed EndPointProvisioningState = "Failed" + EndPointProvisioningStateSucceeded EndPointProvisioningState = "Succeeded" + EndPointProvisioningStateUpdating EndPointProvisioningState = "Updating" +) + +type KeySource string + +const ( + KeySourceMicrosoftKeyVault KeySource = "Microsoft.KeyVault" +) + +type ManagedServiceIdentityType string + +const ( + ManagedServiceIdentityTypeNone ManagedServiceIdentityType = "None" + ManagedServiceIdentityTypeSystemAssigned ManagedServiceIdentityType = "SystemAssigned" + ManagedServiceIdentityTypeSystemAssignedUserAssigned ManagedServiceIdentityType = "SystemAssigned, UserAssigned" + ManagedServiceIdentityTypeUserAssigned ManagedServiceIdentityType = "UserAssigned" +) + +type PrivateLinkConnectionStatus string + +const ( + PrivateLinkConnectionStatusApproved PrivateLinkConnectionStatus = "Approved" + PrivateLinkConnectionStatusDisconnected PrivateLinkConnectionStatus = "Disconnected" + PrivateLinkConnectionStatusPending PrivateLinkConnectionStatus = "Pending" + PrivateLinkConnectionStatusRejected PrivateLinkConnectionStatus = "Rejected" +) + +type SkuName string + +const ( + SkuNameBasic SkuName = "Basic" + SkuNamePremium SkuName = "Premium" + SkuNameStandard SkuName = "Standard" +) + +type SkuTier string + +const ( + SkuTierBasic SkuTier = "Basic" + SkuTierPremium SkuTier = "Premium" + SkuTierStandard SkuTier = "Standard" +) diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/id_namespace.go b/azurerm/internal/services/eventhub/sdk/namespaces/id_namespace.go new file mode 100644 index 000000000000..a9e972b4602c --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/id_namespace.go @@ -0,0 +1,108 @@ +package namespaces + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +type NamespaceId struct { + SubscriptionId string + ResourceGroup string + Name string +} + +func NewNamespaceID(subscriptionId, resourceGroup, name string) NamespaceId { + return NamespaceId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + Name: name, + } +} + +func (id NamespaceId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Namespace", segmentsStr) +} + +func (id NamespaceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.EventHub/namespaces/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.Name) +} + +// ParseNamespaceID parses a Namespace ID into an NamespaceId struct +func ParseNamespaceID(input string) (*NamespaceId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := NamespaceId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.Name, err = id.PopSegment("namespaces"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} + +// ParseNamespaceIDInsensitively parses an Namespace ID into an NamespaceId struct, insensitively +// This should only be used to parse an ID for rewriting to a consistent casing, +// the ParseNamespaceID method should be used instead for validation etc. +func ParseNamespaceIDInsensitively(input string) (*NamespaceId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := NamespaceId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + // find the correct casing for the 'namespaces' segment + namespacesKey := "namespaces" + for key := range id.Path { + if strings.EqualFold(key, namespacesKey) { + namespacesKey = key + break + } + } + if resourceId.Name, err = id.PopSegment(namespacesKey); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/id_namespace_test.go b/azurerm/internal/services/eventhub/sdk/namespaces/id_namespace_test.go new file mode 100644 index 000000000000..ee3b172e6484 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/id_namespace_test.go @@ -0,0 +1,227 @@ +package namespaces + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.Id = NamespaceId{} + +func TestNamespaceIDFormatter(t *testing.T) { + actual := NewNamespaceID("{subscriptionId}", "{resourceGroupName}", "{namespaceName}").ID() + expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestParseNamespaceID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NamespaceId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.EVENTHUB/NAMESPACES/{NAMESPACENAME}", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseNamespaceID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} + +func TestParseNamespaceIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NamespaceId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // lower-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // upper-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NAMESPACES/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // mixed-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NaMeSpAcEs/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseNamespaceIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/id_resourcegroup.go b/azurerm/internal/services/eventhub/sdk/namespaces/id_resourcegroup.go new file mode 100644 index 000000000000..2af65df03159 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/id_resourcegroup.go @@ -0,0 +1,89 @@ +package namespaces + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +type ResourceGroupId struct { + SubscriptionId string + ResourceGroup string +} + +func NewResourceGroupID(subscriptionId, resourceGroup string) ResourceGroupId { + return ResourceGroupId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + } +} + +func (id ResourceGroupId) String() string { + segments := []string{ + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Resource Group", segmentsStr) +} + +func (id ResourceGroupId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup) +} + +// ParseResourceGroupID parses a ResourceGroup ID into an ResourceGroupId struct +func ParseResourceGroupID(input string) (*ResourceGroupId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := ResourceGroupId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} + +// ParseResourceGroupIDInsensitively parses an ResourceGroup ID into an ResourceGroupId struct, insensitively +// This should only be used to parse an ID for rewriting to a consistent casing, +// the ParseResourceGroupID method should be used instead for validation etc. +func ParseResourceGroupIDInsensitively(input string) (*ResourceGroupId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := ResourceGroupId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/id_resourcegroup_test.go b/azurerm/internal/services/eventhub/sdk/namespaces/id_resourcegroup_test.go new file mode 100644 index 000000000000..1ebfa7a6fb07 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/id_resourcegroup_test.go @@ -0,0 +1,192 @@ +package namespaces + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.Id = ResourceGroupId{} + +func TestResourceGroupIDFormatter(t *testing.T) { + actual := NewResourceGroupID("{subscriptionId}", "{resourceGroupName}").ID() + expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestParseResourceGroupID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ResourceGroupId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + Expected: &ResourceGroupId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseResourceGroupID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + } +} + +func TestParseResourceGroupIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ResourceGroupId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + Expected: &ResourceGroupId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + }, + }, + + { + // lower-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + Expected: &ResourceGroupId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + }, + }, + + { + // upper-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + Expected: &ResourceGroupId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + }, + }, + + { + // mixed-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}", + Expected: &ResourceGroupId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + }, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseResourceGroupIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + } +} diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/method_createorupdate_autorest.go b/azurerm/internal/services/eventhub/sdk/namespaces/method_createorupdate_autorest.go new file mode 100644 index 000000000000..360a19fe60e4 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/method_createorupdate_autorest.go @@ -0,0 +1,75 @@ +package namespaces + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/polling" +) + +type CreateOrUpdateResponse struct { + Poller polling.LongRunningPoller + HttpResponse *http.Response +} + +// CreateOrUpdate ... +func (c NamespacesClient) CreateOrUpdate(ctx context.Context, id NamespaceId, input EHNamespace) (result CreateOrUpdateResponse, err error) { + req, err := c.preparerForCreateOrUpdate(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "namespaces.NamespacesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = c.senderForCreateOrUpdate(ctx, req) + if err != nil { + err = autorest.NewErrorWithError(err, "namespaces.NamespacesClient", "CreateOrUpdate", result.HttpResponse, "Failure sending request") + return + } + + return +} + +// CreateOrUpdateThenPoll performs CreateOrUpdate then polls until it's completed +func (c NamespacesClient) CreateOrUpdateThenPoll(ctx context.Context, id NamespaceId, input EHNamespace) error { + result, err := c.CreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing CreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(); err != nil { + return fmt.Errorf("polling after CreateOrUpdate: %+v", err) + } + + return nil +} + +// preparerForCreateOrUpdate prepares the CreateOrUpdate request. +func (c NamespacesClient) preparerForCreateOrUpdate(ctx context.Context, id NamespaceId, input EHNamespace) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// senderForCreateOrUpdate sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (c NamespacesClient) senderForCreateOrUpdate(ctx context.Context, req *http.Request) (future CreateOrUpdateResponse, err error) { + var resp *http.Response + resp, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + return + } + future.Poller, err = polling.NewLongRunningPollerFromResponse(ctx, resp, c.Client) + return +} diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/method_delete_autorest.go b/azurerm/internal/services/eventhub/sdk/namespaces/method_delete_autorest.go new file mode 100644 index 000000000000..65644e85c636 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/method_delete_autorest.go @@ -0,0 +1,73 @@ +package namespaces + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/polling" +) + +type DeleteResponse struct { + Poller polling.LongRunningPoller + HttpResponse *http.Response +} + +// Delete ... +func (c NamespacesClient) Delete(ctx context.Context, id NamespaceId) (result DeleteResponse, err error) { + req, err := c.preparerForDelete(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "namespaces.NamespacesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = c.senderForDelete(ctx, req) + if err != nil { + err = autorest.NewErrorWithError(err, "namespaces.NamespacesClient", "Delete", result.HttpResponse, "Failure sending request") + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c NamespacesClient) DeleteThenPoll(ctx context.Context, id NamespaceId) error { + result, err := c.Delete(ctx, id) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} + +// preparerForDelete prepares the Delete request. +func (c NamespacesClient) preparerForDelete(ctx context.Context, id NamespaceId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// senderForDelete sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (c NamespacesClient) senderForDelete(ctx context.Context, req *http.Request) (future DeleteResponse, err error) { + var resp *http.Response + resp, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + return + } + future.Poller, err = polling.NewLongRunningPollerFromResponse(ctx, resp, c.Client) + return +} diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/method_get_autorest.go b/azurerm/internal/services/eventhub/sdk/namespaces/method_get_autorest.go new file mode 100644 index 000000000000..a7e3cb318fb6 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/method_get_autorest.go @@ -0,0 +1,64 @@ +package namespaces + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type GetResponse struct { + HttpResponse *http.Response + Model *EHNamespace +} + +// Get ... +func (c NamespacesClient) Get(ctx context.Context, id NamespaceId) (result GetResponse, err error) { + req, err := c.preparerForGet(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "namespaces.NamespacesClient", "Get", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "namespaces.NamespacesClient", "Get", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForGet(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "namespaces.NamespacesClient", "Get", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForGet prepares the Get request. +func (c NamespacesClient) preparerForGet(ctx context.Context, id NamespaceId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForGet handles the response to the Get request. The method always +// closes the http.Response Body. +func (c NamespacesClient) responderForGet(resp *http.Response) (result GetResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/method_listbyresourcegroup_autorest.go b/azurerm/internal/services/eventhub/sdk/namespaces/method_listbyresourcegroup_autorest.go new file mode 100644 index 000000000000..1c667abb53e1 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/method_listbyresourcegroup_autorest.go @@ -0,0 +1,196 @@ +package namespaces + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type ListByResourceGroupResponse struct { + HttpResponse *http.Response + Model *[]EHNamespace + + nextLink *string + nextPageFunc func(ctx context.Context, nextLink string) (ListByResourceGroupResponse, error) +} + +type ListByResourceGroupCompleteResult struct { + Items []EHNamespace +} + +func (r ListByResourceGroupResponse) HasMore() bool { + return r.nextLink != nil +} + +func (r ListByResourceGroupResponse) LoadMore(ctx context.Context) (resp ListByResourceGroupResponse, err error) { + if !r.HasMore() { + err = fmt.Errorf("no more pages returned") + return + } + return r.nextPageFunc(ctx, *r.nextLink) +} + +type EHNamespacePredicate struct { + // TODO: implement me +} + +func (p EHNamespacePredicate) Matches(input EHNamespace) bool { + // TODO: implement me + // if p.Name != nil && input.Name != *p.Name { + // return false + // } + + return true +} + +// ListByResourceGroup ... +func (c NamespacesClient) ListByResourceGroup(ctx context.Context, id ResourceGroupId) (resp ListByResourceGroupResponse, err error) { + req, err := c.preparerForListByResourceGroup(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "namespaces.NamespacesClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "namespaces.NamespacesClient", "ListByResourceGroup", resp.HttpResponse, "Failure sending request") + return + } + + resp, err = c.responderForListByResourceGroup(resp.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "namespaces.NamespacesClient", "ListByResourceGroup", resp.HttpResponse, "Failure responding to request") + return + } + return +} + +// ListByResourceGroupCompleteMatchingPredicate retrieves all of the results into a single object +func (c NamespacesClient) ListByResourceGroupComplete(ctx context.Context, id ResourceGroupId) (ListByResourceGroupCompleteResult, error) { + return c.ListByResourceGroupCompleteMatchingPredicate(ctx, id, EHNamespacePredicate{}) +} + +// ListByResourceGroupCompleteMatchingPredicate retrieves all of the results and then applied the predicate +func (c NamespacesClient) ListByResourceGroupCompleteMatchingPredicate(ctx context.Context, id ResourceGroupId, predicate EHNamespacePredicate) (resp ListByResourceGroupCompleteResult, err error) { + items := make([]EHNamespace, 0) + + page, err := c.ListByResourceGroup(ctx, id) + if err != nil { + err = fmt.Errorf("loading the initial page: %+v", err) + return + } + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + for page.HasMore() { + page, err = page.LoadMore(ctx) + if err != nil { + err = fmt.Errorf("loading the next page: %+v", err) + return + } + + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + } + + out := ListByResourceGroupCompleteResult{ + Items: items, + } + return out, nil +} + +// preparerForListByResourceGroup prepares the ListByResourceGroup request. +func (c NamespacesClient) preparerForListByResourceGroup(ctx context.Context, id ResourceGroupId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/providers/Microsoft.EventHub/namespaces", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// preparerForListByResourceGroupWithNextLink prepares the ListByResourceGroup request with the given nextLink token. +func (c NamespacesClient) preparerForListByResourceGroupWithNextLink(ctx context.Context, nextLink string) (*http.Request, error) { + uri, err := url.Parse(nextLink) + if err != nil { + return nil, fmt.Errorf("parsing nextLink %q: %+v", nextLink, err) + } + queryParameters := map[string]interface{}{} + for k, v := range uri.Query() { + if len(v) == 0 { + continue + } + val := v[0] + val = autorest.Encode("query", val) + queryParameters[k] = val + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(uri.Path), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForListByResourceGroup handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (c NamespacesClient) responderForListByResourceGroup(resp *http.Response) (result ListByResourceGroupResponse, err error) { + type page struct { + Values []EHNamespace `json:"value"` + NextLink *string `json:"nextLink"` + } + var respObj page + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&respObj), + autorest.ByClosing()) + result.HttpResponse = resp + result.Model = &respObj.Values + result.nextLink = respObj.NextLink + if respObj.NextLink != nil { + result.nextPageFunc = func(ctx context.Context, nextLink string) (result ListByResourceGroupResponse, err error) { + req, err := c.preparerForListByResourceGroupWithNextLink(ctx, nextLink) + if err != nil { + err = autorest.NewErrorWithError(err, "namespaces.NamespacesClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "namespaces.NamespacesClient", "ListByResourceGroup", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForListByResourceGroup(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "namespaces.NamespacesClient", "ListByResourceGroup", result.HttpResponse, "Failure responding to request") + return + } + + return + } + } + return +} diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/method_update_autorest.go b/azurerm/internal/services/eventhub/sdk/namespaces/method_update_autorest.go new file mode 100644 index 000000000000..c96f80cec82f --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/method_update_autorest.go @@ -0,0 +1,65 @@ +package namespaces + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type UpdateResponse struct { + HttpResponse *http.Response + Model *EHNamespace +} + +// Update ... +func (c NamespacesClient) Update(ctx context.Context, id NamespaceId, input EHNamespace) (result UpdateResponse, err error) { + req, err := c.preparerForUpdate(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "namespaces.NamespacesClient", "Update", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "namespaces.NamespacesClient", "Update", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForUpdate(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "namespaces.NamespacesClient", "Update", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForUpdate prepares the Update request. +func (c NamespacesClient) preparerForUpdate(ctx context.Context, id NamespaceId, input EHNamespace) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForUpdate handles the response to the Update request. The method always +// closes the http.Response Body. +func (c NamespacesClient) responderForUpdate(resp *http.Response) (result UpdateResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/model_connectionstate.go b/azurerm/internal/services/eventhub/sdk/namespaces/model_connectionstate.go new file mode 100644 index 000000000000..138fa820d918 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/model_connectionstate.go @@ -0,0 +1,6 @@ +package namespaces + +type ConnectionState struct { + Description *string `json:"description,omitempty"` + Status *PrivateLinkConnectionStatus `json:"status,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/model_ehnamespace.go b/azurerm/internal/services/eventhub/sdk/namespaces/model_ehnamespace.go new file mode 100644 index 000000000000..b1c6c79f48f0 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/model_ehnamespace.go @@ -0,0 +1,13 @@ +package namespaces + +type EHNamespace struct { + Id *string `json:"id,omitempty"` + Identity *Identity `json:"identity,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *EHNamespaceProperties `json:"properties,omitempty"` + Sku *Sku `json:"sku,omitempty"` + SystemData *SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/model_ehnamespaceproperties.go b/azurerm/internal/services/eventhub/sdk/namespaces/model_ehnamespaceproperties.go new file mode 100644 index 000000000000..00e7248c8e88 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/model_ehnamespaceproperties.go @@ -0,0 +1,41 @@ +package namespaces + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/formatting" +) + +type EHNamespaceProperties struct { + ClusterArmId *string `json:"clusterArmId,omitempty"` + CreatedAt *string `json:"createdAt,omitempty"` + Encryption *Encryption `json:"encryption,omitempty"` + IsAutoInflateEnabled *bool `json:"isAutoInflateEnabled,omitempty"` + KafkaEnabled *bool `json:"kafkaEnabled,omitempty"` + MaximumThroughputUnits *int64 `json:"maximumThroughputUnits,omitempty"` + MetricId *string `json:"metricId,omitempty"` + PrivateEndpointConnections *[]PrivateEndpointConnection `json:"privateEndpointConnections,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + ServiceBusEndpoint *string `json:"serviceBusEndpoint,omitempty"` + Status *string `json:"status,omitempty"` + UpdatedAt *string `json:"updatedAt,omitempty"` + ZoneRedundant *bool `json:"zoneRedundant,omitempty"` +} + +func (o EHNamespaceProperties) ListCreatedAtAsTime() (*time.Time, error) { + return formatting.ParseAsDateFormat(o.CreatedAt, "2006-01-02T15:04:05Z07:00") +} + +func (o EHNamespaceProperties) SetCreatedAtAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CreatedAt = &formatted +} + +func (o EHNamespaceProperties) ListUpdatedAtAsTime() (*time.Time, error) { + return formatting.ParseAsDateFormat(o.UpdatedAt, "2006-01-02T15:04:05Z07:00") +} + +func (o EHNamespaceProperties) SetUpdatedAtAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.UpdatedAt = &formatted +} diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/model_encryption.go b/azurerm/internal/services/eventhub/sdk/namespaces/model_encryption.go new file mode 100644 index 000000000000..b8aa488c8240 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/model_encryption.go @@ -0,0 +1,7 @@ +package namespaces + +type Encryption struct { + KeySource *KeySource `json:"keySource,omitempty"` + KeyVaultProperties *[]KeyVaultProperties `json:"keyVaultProperties,omitempty"` + RequireInfrastructureEncryption *bool `json:"requireInfrastructureEncryption,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/model_identity.go b/azurerm/internal/services/eventhub/sdk/namespaces/model_identity.go new file mode 100644 index 000000000000..4c4f614923f0 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/model_identity.go @@ -0,0 +1,8 @@ +package namespaces + +type Identity struct { + PrincipalId *string `json:"principalId,omitempty"` + TenantId *string `json:"tenantId,omitempty"` + Type *ManagedServiceIdentityType `json:"type,omitempty"` + UserAssignedIdentities *UserAssignedIdentityProperties `json:"userAssignedIdentities,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/model_keyvaultproperties.go b/azurerm/internal/services/eventhub/sdk/namespaces/model_keyvaultproperties.go new file mode 100644 index 000000000000..aa274eeb6839 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/model_keyvaultproperties.go @@ -0,0 +1,8 @@ +package namespaces + +type KeyVaultProperties struct { + Identity *UserAssignedIdentityProperties `json:"identity,omitempty"` + KeyName *string `json:"keyName,omitempty"` + KeyVaultUri *string `json:"keyVaultUri,omitempty"` + KeyVersion *string `json:"keyVersion,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/model_privateendpoint.go b/azurerm/internal/services/eventhub/sdk/namespaces/model_privateendpoint.go new file mode 100644 index 000000000000..6e300adf7f04 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/model_privateendpoint.go @@ -0,0 +1,5 @@ +package namespaces + +type PrivateEndpoint struct { + Id *string `json:"id,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/model_privateendpointconnection.go b/azurerm/internal/services/eventhub/sdk/namespaces/model_privateendpointconnection.go new file mode 100644 index 000000000000..55a3f4fa39cc --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/model_privateendpointconnection.go @@ -0,0 +1,9 @@ +package namespaces + +type PrivateEndpointConnection struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *PrivateEndpointConnectionProperties `json:"properties,omitempty"` + SystemData *SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/model_privateendpointconnectionproperties.go b/azurerm/internal/services/eventhub/sdk/namespaces/model_privateendpointconnectionproperties.go new file mode 100644 index 000000000000..2f78e08f2988 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/model_privateendpointconnectionproperties.go @@ -0,0 +1,7 @@ +package namespaces + +type PrivateEndpointConnectionProperties struct { + PrivateEndpoint *PrivateEndpoint `json:"privateEndpoint,omitempty"` + PrivateLinkServiceConnectionState *ConnectionState `json:"privateLinkServiceConnectionState,omitempty"` + ProvisioningState *EndPointProvisioningState `json:"provisioningState,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/model_sku.go b/azurerm/internal/services/eventhub/sdk/namespaces/model_sku.go new file mode 100644 index 000000000000..d1f192b45e8f --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/model_sku.go @@ -0,0 +1,7 @@ +package namespaces + +type Sku struct { + Capacity *int64 `json:"capacity,omitempty"` + Name SkuName `json:"name"` + Tier *SkuTier `json:"tier,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/model_systemdata.go b/azurerm/internal/services/eventhub/sdk/namespaces/model_systemdata.go new file mode 100644 index 000000000000..8d5bc25fb141 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/model_systemdata.go @@ -0,0 +1,34 @@ +package namespaces + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/formatting" +) + +type SystemData struct { + CreatedAt *string `json:"createdAt,omitempty"` + CreatedBy *string `json:"createdBy,omitempty"` + CreatedByType *CreatedByType `json:"createdByType,omitempty"` + LastModifiedAt *string `json:"lastModifiedAt,omitempty"` + LastModifiedBy *string `json:"lastModifiedBy,omitempty"` + LastModifiedByType *CreatedByType `json:"lastModifiedByType,omitempty"` +} + +func (o SystemData) ListCreatedAtAsTime() (*time.Time, error) { + return formatting.ParseAsDateFormat(o.CreatedAt, "2006-01-02T15:04:05Z07:00") +} + +func (o SystemData) SetCreatedAtAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CreatedAt = &formatted +} + +func (o SystemData) ListLastModifiedAtAsTime() (*time.Time, error) { + return formatting.ParseAsDateFormat(o.LastModifiedAt, "2006-01-02T15:04:05Z07:00") +} + +func (o SystemData) SetLastModifiedAtAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastModifiedAt = &formatted +} diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/model_userassignedidentityproperties.go b/azurerm/internal/services/eventhub/sdk/namespaces/model_userassignedidentityproperties.go new file mode 100644 index 000000000000..fbd80f5128fa --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/model_userassignedidentityproperties.go @@ -0,0 +1,5 @@ +package namespaces + +type UserAssignedIdentityProperties struct { + UserAssignedIdentity *string `json:"userAssignedIdentity,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/namespaces/version.go b/azurerm/internal/services/eventhub/sdk/namespaces/version.go new file mode 100644 index 000000000000..cf70f01b0f27 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/namespaces/version.go @@ -0,0 +1,9 @@ +package namespaces + +import "fmt" + +const defaultApiVersion = "2021-01-01-preview" + +func userAgent() string { + return fmt.Sprintf("pandora/namespaces/%s", defaultApiVersion) +} diff --git a/azurerm/internal/services/eventhub/sdk/networkrulesets/client.go b/azurerm/internal/services/eventhub/sdk/networkrulesets/client.go new file mode 100644 index 000000000000..99e6e3100f6d --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/networkrulesets/client.go @@ -0,0 +1,15 @@ +package networkrulesets + +import "github.com/Azure/go-autorest/autorest" + +type NetworkRuleSetsClient struct { + Client autorest.Client + baseUri string +} + +func NewNetworkRuleSetsClientWithBaseURI(endpoint string) NetworkRuleSetsClient { + return NetworkRuleSetsClient{ + Client: autorest.NewClientWithUserAgent(userAgent()), + baseUri: endpoint, + } +} diff --git a/azurerm/internal/services/eventhub/sdk/networkrulesets/constants.go b/azurerm/internal/services/eventhub/sdk/networkrulesets/constants.go new file mode 100644 index 000000000000..5246f32f5ee3 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/networkrulesets/constants.go @@ -0,0 +1,14 @@ +package networkrulesets + +type DefaultAction string + +const ( + DefaultActionAllow DefaultAction = "Allow" + DefaultActionDeny DefaultAction = "Deny" +) + +type NetworkRuleIPAction string + +const ( + NetworkRuleIPActionAllow NetworkRuleIPAction = "Allow" +) diff --git a/azurerm/internal/services/eventhub/sdk/networkrulesets/id_namespace.go b/azurerm/internal/services/eventhub/sdk/networkrulesets/id_namespace.go new file mode 100644 index 000000000000..cf89f3b5c9d6 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/networkrulesets/id_namespace.go @@ -0,0 +1,108 @@ +package networkrulesets + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +type NamespaceId struct { + SubscriptionId string + ResourceGroup string + Name string +} + +func NewNamespaceID(subscriptionId, resourceGroup, name string) NamespaceId { + return NamespaceId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + Name: name, + } +} + +func (id NamespaceId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Namespace", segmentsStr) +} + +func (id NamespaceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.EventHub/namespaces/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.Name) +} + +// NamespaceID parses a Namespace ID into an NamespaceId struct +func NamespaceID(input string) (*NamespaceId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := NamespaceId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.Name, err = id.PopSegment("namespaces"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} + +// NamespaceIDInsensitively parses an Namespace ID into an NamespaceId struct, insensitively +// This should only be used to parse an ID for rewriting to a consistent casing, +// the NamespaceID method should be used instead for validation etc. +func NamespaceIDInsensitively(input string) (*NamespaceId, error) { + id, err := resourceids.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := NamespaceId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + // find the correct casing for the 'namespaces' segment + namespacesKey := "namespaces" + for key := range id.Path { + if strings.EqualFold(key, namespacesKey) { + namespacesKey = key + break + } + } + if resourceId.Name, err = id.PopSegment(namespacesKey); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/eventhub/sdk/networkrulesets/id_namespace_test.go b/azurerm/internal/services/eventhub/sdk/networkrulesets/id_namespace_test.go new file mode 100644 index 000000000000..403771c6e19c --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/networkrulesets/id_namespace_test.go @@ -0,0 +1,227 @@ +package networkrulesets + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.Id = NamespaceId{} + +func TestNamespaceIDFormatter(t *testing.T) { + actual := NewNamespaceID("{subscriptionId}", "{resourceGroupName}", "{namespaceName}").ID() + expected := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestNamespaceID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NamespaceId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/{SUBSCRIPTIONID}/RESOURCEGROUPS/{RESOURCEGROUPNAME}/PROVIDERS/MICROSOFT.EVENTHUB/NAMESPACES/{NAMESPACENAME}", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := NamespaceID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} + +func TestNamespaceIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NamespaceId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/{subscriptionId}/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/{subscriptionId}/resourceGroups/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // lower-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // upper-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NAMESPACES/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + + { + // mixed-cased segment names + Input: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/NaMeSpAcEs/{namespaceName}", + Expected: &NamespaceId{ + SubscriptionId: "{subscriptionId}", + ResourceGroup: "{resourceGroupName}", + Name: "{namespaceName}", + }, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := NamespaceIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/eventhub/sdk/networkrulesets/method_namespacescreateorupdatenetworkruleset_autorest.go b/azurerm/internal/services/eventhub/sdk/networkrulesets/method_namespacescreateorupdatenetworkruleset_autorest.go new file mode 100644 index 000000000000..a72626d773ea --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/networkrulesets/method_namespacescreateorupdatenetworkruleset_autorest.go @@ -0,0 +1,66 @@ +package networkrulesets + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type NamespacesCreateOrUpdateNetworkRuleSetResponse struct { + HttpResponse *http.Response + Model *NetworkRuleSet +} + +// NamespacesCreateOrUpdateNetworkRuleSet ... +func (c NetworkRuleSetsClient) NamespacesCreateOrUpdateNetworkRuleSet(ctx context.Context, id NamespaceId, input NetworkRuleSet) (result NamespacesCreateOrUpdateNetworkRuleSetResponse, err error) { + req, err := c.preparerForNamespacesCreateOrUpdateNetworkRuleSet(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "networkrulesets.NetworkRuleSetsClient", "NamespacesCreateOrUpdateNetworkRuleSet", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "networkrulesets.NetworkRuleSetsClient", "NamespacesCreateOrUpdateNetworkRuleSet", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForNamespacesCreateOrUpdateNetworkRuleSet(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "networkrulesets.NetworkRuleSetsClient", "NamespacesCreateOrUpdateNetworkRuleSet", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForNamespacesCreateOrUpdateNetworkRuleSet prepares the NamespacesCreateOrUpdateNetworkRuleSet request. +func (c NetworkRuleSetsClient) preparerForNamespacesCreateOrUpdateNetworkRuleSet(ctx context.Context, id NamespaceId, input NetworkRuleSet) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/networkRuleSets/default", id.ID())), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForNamespacesCreateOrUpdateNetworkRuleSet handles the response to the NamespacesCreateOrUpdateNetworkRuleSet request. The method always +// closes the http.Response Body. +func (c NetworkRuleSetsClient) responderForNamespacesCreateOrUpdateNetworkRuleSet(resp *http.Response) (result NamespacesCreateOrUpdateNetworkRuleSetResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/networkrulesets/method_namespacesgetnetworkruleset_autorest.go b/azurerm/internal/services/eventhub/sdk/networkrulesets/method_namespacesgetnetworkruleset_autorest.go new file mode 100644 index 000000000000..4b1c83ec612d --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/networkrulesets/method_namespacesgetnetworkruleset_autorest.go @@ -0,0 +1,65 @@ +package networkrulesets + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type NamespacesGetNetworkRuleSetResponse struct { + HttpResponse *http.Response + Model *NetworkRuleSet +} + +// NamespacesGetNetworkRuleSet ... +func (c NetworkRuleSetsClient) NamespacesGetNetworkRuleSet(ctx context.Context, id NamespaceId) (result NamespacesGetNetworkRuleSetResponse, err error) { + req, err := c.preparerForNamespacesGetNetworkRuleSet(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "networkrulesets.NetworkRuleSetsClient", "NamespacesGetNetworkRuleSet", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "networkrulesets.NetworkRuleSetsClient", "NamespacesGetNetworkRuleSet", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForNamespacesGetNetworkRuleSet(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "networkrulesets.NetworkRuleSetsClient", "NamespacesGetNetworkRuleSet", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForNamespacesGetNetworkRuleSet prepares the NamespacesGetNetworkRuleSet request. +func (c NetworkRuleSetsClient) preparerForNamespacesGetNetworkRuleSet(ctx context.Context, id NamespaceId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/networkRuleSets/default", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForNamespacesGetNetworkRuleSet handles the response to the NamespacesGetNetworkRuleSet request. The method always +// closes the http.Response Body. +func (c NetworkRuleSetsClient) responderForNamespacesGetNetworkRuleSet(resp *http.Response) (result NamespacesGetNetworkRuleSetResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + return +} diff --git a/azurerm/internal/services/eventhub/sdk/networkrulesets/model_networkruleset.go b/azurerm/internal/services/eventhub/sdk/networkrulesets/model_networkruleset.go new file mode 100644 index 000000000000..4bd3ec511b79 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/networkrulesets/model_networkruleset.go @@ -0,0 +1,8 @@ +package networkrulesets + +type NetworkRuleSet struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *NetworkRuleSetProperties `json:"properties,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/networkrulesets/model_networkrulesetproperties.go b/azurerm/internal/services/eventhub/sdk/networkrulesets/model_networkrulesetproperties.go new file mode 100644 index 000000000000..790b249d172f --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/networkrulesets/model_networkrulesetproperties.go @@ -0,0 +1,8 @@ +package networkrulesets + +type NetworkRuleSetProperties struct { + DefaultAction *DefaultAction `json:"defaultAction,omitempty"` + IpRules *[]NWRuleSetIpRules `json:"ipRules,omitempty"` + TrustedServiceAccessEnabled *bool `json:"trustedServiceAccessEnabled,omitempty"` + VirtualNetworkRules *[]NWRuleSetVirtualNetworkRules `json:"virtualNetworkRules,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/networkrulesets/model_nwrulesetiprules.go b/azurerm/internal/services/eventhub/sdk/networkrulesets/model_nwrulesetiprules.go new file mode 100644 index 000000000000..647113a5953a --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/networkrulesets/model_nwrulesetiprules.go @@ -0,0 +1,6 @@ +package networkrulesets + +type NWRuleSetIpRules struct { + Action *NetworkRuleIPAction `json:"action,omitempty"` + IpMask *string `json:"ipMask,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/networkrulesets/model_nwrulesetvirtualnetworkrules.go b/azurerm/internal/services/eventhub/sdk/networkrulesets/model_nwrulesetvirtualnetworkrules.go new file mode 100644 index 000000000000..4f422a1e6323 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/networkrulesets/model_nwrulesetvirtualnetworkrules.go @@ -0,0 +1,6 @@ +package networkrulesets + +type NWRuleSetVirtualNetworkRules struct { + IgnoreMissingVnetServiceEndpoint *bool `json:"ignoreMissingVnetServiceEndpoint,omitempty"` + Subnet *Subnet `json:"subnet,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/networkrulesets/model_subnet.go b/azurerm/internal/services/eventhub/sdk/networkrulesets/model_subnet.go new file mode 100644 index 000000000000..efae96149823 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/networkrulesets/model_subnet.go @@ -0,0 +1,5 @@ +package networkrulesets + +type Subnet struct { + Id *string `json:"id,omitempty"` +} diff --git a/azurerm/internal/services/eventhub/sdk/networkrulesets/version.go b/azurerm/internal/services/eventhub/sdk/networkrulesets/version.go new file mode 100644 index 000000000000..4ad3252eb7d8 --- /dev/null +++ b/azurerm/internal/services/eventhub/sdk/networkrulesets/version.go @@ -0,0 +1,9 @@ +package networkrulesets + +import "fmt" + +const defaultApiVersion = "2018-01-01-preview" + +func userAgent() string { + return fmt.Sprintf("pandora/networkrulesets/%s", defaultApiVersion) +} diff --git a/azurerm/internal/services/eventhub/transition.go b/azurerm/internal/services/eventhub/transition.go new file mode 100644 index 000000000000..8e1b0e906dd2 --- /dev/null +++ b/azurerm/internal/services/eventhub/transition.go @@ -0,0 +1,23 @@ +package eventhub + +import "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" + +func expandTags(input map[string]interface{}) *map[string]string { + output := make(map[string]string) + for k, v := range input { + output[k] = v.(string) + } + return &output +} + +func flattenTags(input *map[string]string) map[string]*string { + output := make(map[string]*string) + + if input != nil { + for k, v := range *input { + output[k] = utils.String(v) + } + } + + return output +} diff --git a/azurerm/internal/services/eventhub/validate/eventhub_names.go b/azurerm/internal/services/eventhub/validate/eventhub_names.go index b599cec268db..e35b37f8de40 100644 --- a/azurerm/internal/services/eventhub/validate/eventhub_names.go +++ b/azurerm/internal/services/eventhub/validate/eventhub_names.go @@ -3,33 +3,33 @@ package validate import ( "regexp" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" ) // validation -func ValidateEventHubNamespaceName() schema.SchemaValidateFunc { +func ValidateEventHubNamespaceName() pluginsdk.SchemaValidateFunc { return validation.StringMatch( regexp.MustCompile("^[a-zA-Z][-a-zA-Z0-9]{4,48}[a-zA-Z0-9]$"), "The namespace name can contain only letters, numbers and hyphens. The namespace must start with a letter, and it must end with a letter or number and be between 6 and 50 characters long.", ) } -func ValidateEventHubName() schema.SchemaValidateFunc { +func ValidateEventHubName() pluginsdk.SchemaValidateFunc { return validation.StringMatch( regexp.MustCompile("^[a-zA-Z0-9]([-._a-zA-Z0-9]{0,48}[a-zA-Z0-9])?$"), "The event hub name can contain only letters, numbers, periods (.), hyphens (-),and underscores (_), up to 50 characters, and it must begin and end with a letter or number.", ) } -func ValidateEventHubConsumerName() schema.SchemaValidateFunc { +func ValidateEventHubConsumerName() pluginsdk.SchemaValidateFunc { return validation.StringMatch( regexp.MustCompile("^[a-zA-Z0-9]([-._a-zA-Z0-9]{0,48}[a-zA-Z0-9])?$"), "The consumer group name can contain only letters, numbers, periods (.), hyphens (-),and underscores (_), up to 50 characters, and it must begin and end with a letter or number.", ) } -func ValidateEventHubAuthorizationRuleName() schema.SchemaValidateFunc { +func ValidateEventHubAuthorizationRuleName() pluginsdk.SchemaValidateFunc { return validation.StringMatch( regexp.MustCompile("^[a-zA-Z0-9]([-._a-zA-Z0-9]{0,48}[a-zA-Z0-9])?$"), "The authorization rule name can contain only letters, numbers, periods, hyphens and underscores. The name must start and end with a letter or number and be up to 50 characters long.", diff --git a/azurerm/internal/services/firewall/client/client.go b/azurerm/internal/services/firewall/client/client.go index 894d10aee9b8..463ffae081ae 100644 --- a/azurerm/internal/services/firewall/client/client.go +++ b/azurerm/internal/services/firewall/client/client.go @@ -1,7 +1,7 @@ package client import ( - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/common" ) diff --git a/azurerm/internal/services/firewall/firewall_application_rule_collection_resource.go b/azurerm/internal/services/firewall/firewall_application_rule_collection_resource.go index 3909a6d0022c..03955f338379 100644 --- a/azurerm/internal/services/firewall/firewall_application_rule_collection_resource.go +++ b/azurerm/internal/services/firewall/firewall_application_rule_collection_resource.go @@ -5,9 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" @@ -17,12 +15,13 @@ import ( firewallValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/firewall/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/set" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceFirewallApplicationRuleCollection() *schema.Resource { - return &schema.Resource{ +func resourceFirewallApplicationRuleCollection() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceFirewallApplicationRuleCollectionCreateUpdate, Read: resourceFirewallApplicationRuleCollectionRead, Update: resourceFirewallApplicationRuleCollectionCreateUpdate, @@ -30,23 +29,23 @@ func resourceFirewallApplicationRuleCollection() *schema.Resource { // TODO: replace this with an importer which validates the ID during import Importer: pluginsdk.DefaultImporter(), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: firewallValidate.FirewallName, }, "azure_firewall_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: firewallValidate.FirewallName, @@ -55,13 +54,13 @@ func resourceFirewallApplicationRuleCollection() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "priority": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, ValidateFunc: validation.IntBetween(100, 65000), }, "action": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ string(network.AzureFirewallRCActionTypeAllow), @@ -70,52 +69,52 @@ func resourceFirewallApplicationRuleCollection() *schema.Resource { }, "rule": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Required: true, MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.NoZeroValues, }, "source_addresses": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "source_ip_groups": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "description": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "fqdn_tags": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "target_fqdns": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "protocol": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ string(network.AzureFirewallApplicationRuleProtocolTypeHTTP), @@ -124,7 +123,7 @@ func resourceFirewallApplicationRuleCollection() *schema.Resource { }, false), }, "port": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, ValidateFunc: validate.PortNumber, }, @@ -138,7 +137,7 @@ func resourceFirewallApplicationRuleCollection() *schema.Resource { } } -func resourceFirewallApplicationRuleCollectionCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceFirewallApplicationRuleCollectionCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Firewall.AzureFirewallsClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -249,7 +248,7 @@ func resourceFirewallApplicationRuleCollectionCreateUpdate(d *schema.ResourceDat return resourceFirewallApplicationRuleCollectionRead(d, meta) } -func resourceFirewallApplicationRuleCollectionRead(d *schema.ResourceData, meta interface{}) error { +func resourceFirewallApplicationRuleCollectionRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Firewall.AzureFirewallsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -322,7 +321,7 @@ func resourceFirewallApplicationRuleCollectionRead(d *schema.ResourceData, meta return nil } -func resourceFirewallApplicationRuleCollectionDelete(d *schema.ResourceData, meta interface{}) error { +func resourceFirewallApplicationRuleCollectionDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Firewall.AzureFirewallsClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -389,10 +388,10 @@ func expandFirewallApplicationRules(inputs []interface{}) (*[]network.AzureFirew ruleName := rule["name"].(string) ruleDescription := rule["description"].(string) - ruleSourceAddresses := rule["source_addresses"].(*schema.Set).List() - ruleSourceIpGroups := rule["source_ip_groups"].(*schema.Set).List() - ruleFqdnTags := rule["fqdn_tags"].(*schema.Set).List() - ruleTargetFqdns := rule["target_fqdns"].(*schema.Set).List() + ruleSourceAddresses := rule["source_addresses"].(*pluginsdk.Set).List() + ruleSourceIpGroups := rule["source_ip_groups"].(*pluginsdk.Set).List() + ruleFqdnTags := rule["fqdn_tags"].(*pluginsdk.Set).List() + ruleTargetFqdns := rule["target_fqdns"].(*pluginsdk.Set).List() output := network.AzureFirewallApplicationRule{ Name: utils.String(ruleName), diff --git a/azurerm/internal/services/firewall/firewall_application_rule_collection_resource_test.go b/azurerm/internal/services/firewall/firewall_application_rule_collection_resource_test.go index 112e365e9db6..4c6dc2642bba 100644 --- a/azurerm/internal/services/firewall/firewall_application_rule_collection_resource_test.go +++ b/azurerm/internal/services/firewall/firewall_application_rule_collection_resource_test.go @@ -6,15 +6,13 @@ import ( "regexp" "testing" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" - - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" ) type FirewallApplicationRuleCollectionResource struct { @@ -24,10 +22,10 @@ func TestAccFirewallApplicationRuleCollection_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_application_rule_collection", "test") r := FirewallApplicationRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestarc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -49,10 +47,10 @@ func TestAccFirewallApplicationRuleCollection_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_application_rule_collection", "test") r := FirewallApplicationRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -67,10 +65,10 @@ func TestAccFirewallApplicationRuleCollection_updatedName(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_application_rule_collection", "test") r := FirewallApplicationRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestarc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -81,7 +79,7 @@ func TestAccFirewallApplicationRuleCollection_updatedName(t *testing.T) { }, { Config: r.updatedName(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestarc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -99,10 +97,10 @@ func TestAccFirewallApplicationRuleCollection_multipleRuleCollections(t *testing secondRule := "azurerm_firewall_application_rule_collection.test_add" - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestarc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -112,22 +110,22 @@ func TestAccFirewallApplicationRuleCollection_multipleRuleCollections(t *testing }, { Config: r.multiple(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestarc"), check.That(data.ResourceName).Key("priority").HasValue("100"), check.That(data.ResourceName).Key("action").HasValue("Allow"), check.That(data.ResourceName).Key("rule.#").HasValue("1"), check.That(secondRule).ExistsInAzure(r), - resource.TestCheckResourceAttr(secondRule, "name", "acctestarc_add"), - resource.TestCheckResourceAttr(secondRule, "priority", "200"), - resource.TestCheckResourceAttr(secondRule, "action", "Deny"), - resource.TestCheckResourceAttr(secondRule, "rule.#", "1"), + acceptance.TestCheckResourceAttr(secondRule, "name", "acctestarc_add"), + acceptance.TestCheckResourceAttr(secondRule, "priority", "200"), + acceptance.TestCheckResourceAttr(secondRule, "action", "Deny"), + acceptance.TestCheckResourceAttr(secondRule, "rule.#", "1"), ), }, { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestarc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -143,10 +141,10 @@ func TestAccFirewallApplicationRuleCollection_update(t *testing.T) { r := FirewallApplicationRuleCollectionResource{} secondResourceName := "azurerm_firewall_application_rule_collection.test_add" - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.multiple(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestarc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -161,7 +159,7 @@ func TestAccFirewallApplicationRuleCollection_update(t *testing.T) { }, { Config: r.multipleUpdate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestarc"), check.That(data.ResourceName).Key("priority").HasValue("300"), @@ -181,10 +179,10 @@ func TestAccFirewallApplicationRuleCollection_disappears(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_application_rule_collection", "test") r := FirewallApplicationRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestarc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -201,10 +199,10 @@ func TestAccFirewallApplicationRuleCollection_multipleRules(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_application_rule_collection", "test") r := FirewallApplicationRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestarc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -214,7 +212,7 @@ func TestAccFirewallApplicationRuleCollection_multipleRules(t *testing.T) { }, { Config: r.multipleRules(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestarc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -224,7 +222,7 @@ func TestAccFirewallApplicationRuleCollection_multipleRules(t *testing.T) { }, { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestarc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -239,10 +237,10 @@ func TestAccFirewallApplicationRuleCollection_multipleProtocols(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_application_rule_collection", "test") r := FirewallApplicationRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.multipleProtocols(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestarc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -263,10 +261,10 @@ func TestAccFirewallApplicationRuleCollection_updateProtocols(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_application_rule_collection", "test") r := FirewallApplicationRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.multipleProtocols(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestarc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -281,7 +279,7 @@ func TestAccFirewallApplicationRuleCollection_updateProtocols(t *testing.T) { }, { Config: r.multipleProtocolsUpdate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestarc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -296,7 +294,7 @@ func TestAccFirewallApplicationRuleCollection_updateProtocols(t *testing.T) { }, { Config: r.multipleProtocols(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestarc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -316,10 +314,10 @@ func TestAccFirewallApplicationRuleCollection_updateFirewallTags(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_application_rule_collection", "test") r := FirewallApplicationRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestarc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -330,7 +328,7 @@ func TestAccFirewallApplicationRuleCollection_updateFirewallTags(t *testing.T) { }, { Config: r.updateFirewallTags(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestarc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -346,10 +344,10 @@ func TestAccFirewallApplicationRuleCollection_ipGroups(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_application_rule_collection", "test") r := FirewallApplicationRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.ipGroups(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("rule.#").HasValue("1"), ), @@ -362,7 +360,7 @@ func TestAccFirewallApplicationRuleCollection_noSource(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_application_rule_collection", "test") r := FirewallApplicationRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.noSource(data), ExpectError: regexp.MustCompile(fmt.Sprintf("at least one of %q and %q must be specified", "source_addresses", "source_ip_groups")), @@ -370,7 +368,7 @@ func TestAccFirewallApplicationRuleCollection_noSource(t *testing.T) { }) } -func (FirewallApplicationRuleCollectionResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (FirewallApplicationRuleCollectionResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { var id, err = azure.ParseAzureResourceID(state.ID) if err != nil { return nil, err @@ -400,7 +398,7 @@ func (FirewallApplicationRuleCollectionResource) Exists(ctx context.Context, cli return utils.Bool(false), nil } -func (t FirewallApplicationRuleCollectionResource) doesNotExist(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) error { +func (t FirewallApplicationRuleCollectionResource) doesNotExist(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) error { var id, err = azure.ParseAzureResourceID(state.ID) if err != nil { return err @@ -421,7 +419,7 @@ func (t FirewallApplicationRuleCollectionResource) doesNotExist(ctx context.Cont return nil } -func (t FirewallApplicationRuleCollectionResource) disappears(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) error { +func (t FirewallApplicationRuleCollectionResource) disappears(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) error { client := clients.Firewall.AzureFirewallsClient var id, err = azure.ParseAzureResourceID(state.ID) if err != nil { diff --git a/azurerm/internal/services/firewall/firewall_data_source.go b/azurerm/internal/services/firewall/firewall_data_source.go index e37f462fbf26..4e156f068fc0 100644 --- a/azurerm/internal/services/firewall/firewall_data_source.go +++ b/azurerm/internal/services/firewall/firewall_data_source.go @@ -4,27 +4,27 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/firewall/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func FirewallDataSource() *schema.Resource { - return &schema.Resource{ +func FirewallDataSource() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: FirewallDataSourceRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.FirewallName, }, @@ -34,39 +34,39 @@ func FirewallDataSource() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupNameForDataSource(), "sku_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "sku_tier": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "firewall_policy_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "ip_configuration": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "subnet_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "public_ip_address_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "private_ip_address": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -74,24 +74,24 @@ func FirewallDataSource() *schema.Resource { }, "management_ip_configuration": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "subnet_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "public_ip_address_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "private_ip_address": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -99,36 +99,36 @@ func FirewallDataSource() *schema.Resource { }, "threat_intel_mode": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "dns_servers": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, }, "virtual_hub": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "virtual_hub_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "public_ip_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Computed: true, }, "public_ip_addresses": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, }, "private_ip_address": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -142,7 +142,7 @@ func FirewallDataSource() *schema.Resource { } } -func FirewallDataSourceRead(d *schema.ResourceData, meta interface{}) error { +func FirewallDataSourceRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Firewall.AzureFirewallsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/firewall/firewall_data_source_test.go b/azurerm/internal/services/firewall/firewall_data_source_test.go index ea93fded1284..960fe0d77b26 100644 --- a/azurerm/internal/services/firewall/firewall_data_source_test.go +++ b/azurerm/internal/services/firewall/firewall_data_source_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -16,10 +15,10 @@ func TestAccFirewallDataSource_basic(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_firewall", "test") r := FirewallDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("ip_configuration.0.name").HasValue("configuration"), check.That(data.ResourceName).Key("ip_configuration.0.private_ip_address").Exists(), ), @@ -31,10 +30,10 @@ func TestAccFirewallDataSource_enableDNS(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_firewall", "test") r := FirewallDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.enableDNS(data, "1.1.1.1", "8.8.8.8"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("dns_servers.#").HasValue("2"), check.That(data.ResourceName).Key("dns_servers.0").HasValue("1.1.1.1"), check.That(data.ResourceName).Key("dns_servers.1").HasValue("8.8.8.8"), @@ -47,10 +46,10 @@ func TestAccFirewallDataSource_withManagementIp(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_firewall", "test") r := FirewallDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.withManagementIp(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("ip_configuration.0.name").HasValue("configuration"), check.That(data.ResourceName).Key("ip_configuration.0.private_ip_address").Exists(), check.That(data.ResourceName).Key("management_ip_configuration.0.name").HasValue("management_configuration"), @@ -64,10 +63,10 @@ func TestAccFirewallDataSource_withFirewallPolicy(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_firewall", "test") r := FirewallDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.withFirewallPolicy(data, "policy1"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("firewall_policy_id").Exists(), ), }, @@ -78,10 +77,10 @@ func TestAccFirewallDataSource_inVirtualhub(t *testing.T) { data := acceptance.BuildTestData(t, "data.azurerm_firewall", "test") r := FirewallDataSource{} - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.inVirtualHub(data, 2), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("virtual_hub.0.virtual_hub_id").Exists(), check.That(data.ResourceName).Key("virtual_hub.0.public_ip_count").HasValue("2"), check.That(data.ResourceName).Key("virtual_hub.0.public_ip_addresses.#").HasValue("2"), diff --git a/azurerm/internal/services/firewall/firewall_nat_rule_collection_resource.go b/azurerm/internal/services/firewall/firewall_nat_rule_collection_resource.go index 16fa4c18f486..34d1390390f2 100644 --- a/azurerm/internal/services/firewall/firewall_nat_rule_collection_resource.go +++ b/azurerm/internal/services/firewall/firewall_nat_rule_collection_resource.go @@ -5,9 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -16,12 +14,13 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/firewall/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/set" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceFirewallNatRuleCollection() *schema.Resource { - return &schema.Resource{ +func resourceFirewallNatRuleCollection() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceFirewallNatRuleCollectionCreateUpdate, Read: resourceFirewallNatRuleCollectionRead, Update: resourceFirewallNatRuleCollectionCreateUpdate, @@ -29,23 +28,23 @@ func resourceFirewallNatRuleCollection() *schema.Resource { // TODO: replace this with an importer which validates the ID during import Importer: pluginsdk.DefaultImporter(), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.FirewallName, }, "azure_firewall_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.FirewallName, @@ -54,80 +53,80 @@ func resourceFirewallNatRuleCollection() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "priority": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, ValidateFunc: validation.IntBetween(100, 65000), }, "action": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.Dnat), - string(network.Snat), + string(network.AzureFirewallNatRCActionTypeDnat), + string(network.AzureFirewallNatRCActionTypeSnat), }, false), }, "rule": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, "description": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "translated_address": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "translated_port": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, }, "source_addresses": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "source_ip_groups": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "destination_addresses": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "destination_ports": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "protocols": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringInSlice([]string{ - string(network.Any), - string(network.ICMP), - string(network.TCP), - string(network.UDP), + string(network.AzureFirewallNetworkRuleProtocolAny), + string(network.AzureFirewallNetworkRuleProtocolICMP), + string(network.AzureFirewallNetworkRuleProtocolTCP), + string(network.AzureFirewallNetworkRuleProtocolUDP), }, false), }, - Set: schema.HashString, + Set: pluginsdk.HashString, }, }, }, @@ -136,7 +135,7 @@ func resourceFirewallNatRuleCollection() *schema.Resource { } } -func resourceFirewallNatRuleCollectionCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceFirewallNatRuleCollectionCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Firewall.AzureFirewallsClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -163,7 +162,7 @@ func resourceFirewallNatRuleCollectionCreateUpdate(d *schema.ResourceData, meta } ruleCollections := *props.NatRuleCollections - natRules, err := expandFirewallNatRules(d.Get("rule").(*schema.Set)) + natRules, err := expandFirewallNatRules(d.Get("rule").(*pluginsdk.Set)) if err != nil { return fmt.Errorf("expanding Firewall NAT Rules: %+v", err) } @@ -248,7 +247,7 @@ func resourceFirewallNatRuleCollectionCreateUpdate(d *schema.ResourceData, meta return resourceFirewallNatRuleCollectionRead(d, meta) } -func resourceFirewallNatRuleCollectionRead(d *schema.ResourceData, meta interface{}) error { +func resourceFirewallNatRuleCollectionRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Firewall.AzureFirewallsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -321,7 +320,7 @@ func resourceFirewallNatRuleCollectionRead(d *schema.ResourceData, meta interfac return nil } -func resourceFirewallNatRuleCollectionDelete(d *schema.ResourceData, meta interface{}) error { +func resourceFirewallNatRuleCollectionDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Firewall.AzureFirewallsClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -380,7 +379,7 @@ func resourceFirewallNatRuleCollectionDelete(d *schema.ResourceData, meta interf return nil } -func expandFirewallNatRules(input *schema.Set) (*[]network.AzureFirewallNatRule, error) { +func expandFirewallNatRules(input *pluginsdk.Set) (*[]network.AzureFirewallNatRule, error) { nwRules := input.List() rules := make([]network.AzureFirewallNatRule, 0) @@ -391,12 +390,12 @@ func expandFirewallNatRules(input *schema.Set) (*[]network.AzureFirewallNatRule, description := rule["description"].(string) sourceAddresses := make([]string, 0) - for _, v := range rule["source_addresses"].(*schema.Set).List() { + for _, v := range rule["source_addresses"].(*pluginsdk.Set).List() { sourceAddresses = append(sourceAddresses, v.(string)) } sourceIpGroups := make([]string, 0) - for _, v := range rule["source_ip_groups"].(*schema.Set).List() { + for _, v := range rule["source_ip_groups"].(*pluginsdk.Set).List() { sourceIpGroups = append(sourceIpGroups, v.(string)) } @@ -405,12 +404,12 @@ func expandFirewallNatRules(input *schema.Set) (*[]network.AzureFirewallNatRule, } destinationAddresses := make([]string, 0) - for _, v := range rule["destination_addresses"].(*schema.Set).List() { + for _, v := range rule["destination_addresses"].(*pluginsdk.Set).List() { destinationAddresses = append(destinationAddresses, v.(string)) } destinationPorts := make([]string, 0) - for _, v := range rule["destination_ports"].(*schema.Set).List() { + for _, v := range rule["destination_ports"].(*pluginsdk.Set).List() { destinationPorts = append(destinationPorts, v.(string)) } @@ -429,7 +428,7 @@ func expandFirewallNatRules(input *schema.Set) (*[]network.AzureFirewallNatRule, } nrProtocols := make([]network.AzureFirewallNetworkRuleProtocol, 0) - protocols := rule["protocols"].(*schema.Set) + protocols := rule["protocols"].(*pluginsdk.Set) for _, v := range protocols.List() { s := network.AzureFirewallNetworkRuleProtocol(v.(string)) nrProtocols = append(nrProtocols, s) diff --git a/azurerm/internal/services/firewall/firewall_nat_rule_collection_resource_test.go b/azurerm/internal/services/firewall/firewall_nat_rule_collection_resource_test.go index ee7c093be654..50d650f2f385 100644 --- a/azurerm/internal/services/firewall/firewall_nat_rule_collection_resource_test.go +++ b/azurerm/internal/services/firewall/firewall_nat_rule_collection_resource_test.go @@ -6,13 +6,12 @@ import ( "regexp" "testing" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -23,10 +22,10 @@ func TestAccFirewallNatRuleCollection_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_nat_rule_collection", "test") r := FirewallNatRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -38,10 +37,10 @@ func TestAccFirewallNatRuleCollection_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_nat_rule_collection", "test") r := FirewallNatRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -56,17 +55,17 @@ func TestAccFirewallNatRuleCollection_updatedName(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_nat_rule_collection", "test") r := FirewallNatRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.updatedName(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -79,17 +78,17 @@ func TestAccFirewallNatRuleCollection_multipleRuleCollections(t *testing.T) { data2 := acceptance.BuildTestData(t, "azurerm_firewall_nat_rule_collection", "test_add") r := FirewallNatRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.multiple(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data2.ResourceName).ExistsInAzure(r), ), @@ -97,7 +96,7 @@ func TestAccFirewallNatRuleCollection_multipleRuleCollections(t *testing.T) { data.ImportStep(), { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -110,10 +109,10 @@ func TestAccFirewallNatRuleCollection_update(t *testing.T) { r := FirewallNatRuleCollectionResource{} secondResourceName := "azurerm_firewall_nat_rule_collection.test_add" - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.multiple(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(secondResourceName).ExistsInAzure(r), ), @@ -121,7 +120,7 @@ func TestAccFirewallNatRuleCollection_update(t *testing.T) { data.ImportStep(), { Config: r.multipleUpdate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(secondResourceName).ExistsInAzure(r), ), @@ -134,10 +133,10 @@ func TestAccFirewallNatRuleCollection_disappears(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_nat_rule_collection", "test") r := FirewallNatRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), data.CheckWithClient(r.disappears), ), @@ -150,24 +149,24 @@ func TestAccFirewallNatRuleCollection_multipleRules(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_nat_rule_collection", "test") r := FirewallNatRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.multipleRules(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -179,17 +178,17 @@ func TestAccFirewallNatRuleCollection_updateFirewallTags(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_nat_rule_collection", "test") r := FirewallNatRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.updateFirewallTags(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -201,10 +200,10 @@ func TestAccFirewallNatRuleCollection_ipGroup(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_nat_rule_collection", "test") r := FirewallNatRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.ipGroup(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -216,7 +215,7 @@ func TestAccFirewallNatRuleCollection_noSource(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_nat_rule_collection", "test") r := FirewallNatRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.noSource(data), ExpectError: regexp.MustCompile(fmt.Sprintf("at least one of %q and %q must be specified", "source_addresses", "source_ip_groups")), @@ -224,7 +223,7 @@ func TestAccFirewallNatRuleCollection_noSource(t *testing.T) { }) } -func (FirewallNatRuleCollectionResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (FirewallNatRuleCollectionResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { var id, err = azure.ParseAzureResourceID(state.ID) if err != nil { return nil, err @@ -254,7 +253,7 @@ func (FirewallNatRuleCollectionResource) Exists(ctx context.Context, clients *cl return utils.Bool(false), nil } -func (t FirewallNatRuleCollectionResource) doesNotExist(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) error { +func (t FirewallNatRuleCollectionResource) doesNotExist(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) error { var id, err = azure.ParseAzureResourceID(state.ID) if err != nil { return err @@ -275,7 +274,7 @@ func (t FirewallNatRuleCollectionResource) doesNotExist(ctx context.Context, cli return nil } -func (t FirewallNatRuleCollectionResource) disappears(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) error { +func (t FirewallNatRuleCollectionResource) disappears(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) error { client := clients.Firewall.AzureFirewallsClient var id, err = azure.ParseAzureResourceID(state.ID) if err != nil { diff --git a/azurerm/internal/services/firewall/firewall_network_rule_collection_resource.go b/azurerm/internal/services/firewall/firewall_network_rule_collection_resource.go index 8d13d1284540..3a5b60aac7d3 100644 --- a/azurerm/internal/services/firewall/firewall_network_rule_collection_resource.go +++ b/azurerm/internal/services/firewall/firewall_network_rule_collection_resource.go @@ -5,9 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -16,12 +14,13 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/firewall/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/set" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceFirewallNetworkRuleCollection() *schema.Resource { - return &schema.Resource{ +func resourceFirewallNetworkRuleCollection() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceFirewallNetworkRuleCollectionCreateUpdate, Read: resourceFirewallNetworkRuleCollectionRead, Update: resourceFirewallNetworkRuleCollectionCreateUpdate, @@ -29,23 +28,23 @@ func resourceFirewallNetworkRuleCollection() *schema.Resource { // TODO: replace this with an importer which validates the ID during import Importer: pluginsdk.DefaultImporter(), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.FirewallName, }, "azure_firewall_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.FirewallName, @@ -54,13 +53,13 @@ func resourceFirewallNetworkRuleCollection() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "priority": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, ValidateFunc: validation.IntBetween(100, 65000), }, "action": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ string(network.AzureFirewallRCActionTypeAllow), @@ -69,69 +68,69 @@ func resourceFirewallNetworkRuleCollection() *schema.Resource { }, "rule": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, "description": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, }, "source_addresses": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "source_ip_groups": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "destination_addresses": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "destination_ip_groups": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "destination_ports": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "destination_fqdns": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + Set: pluginsdk.HashString, }, "protocols": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringInSlice([]string{ - string(network.Any), - string(network.ICMP), - string(network.TCP), - string(network.UDP), + string(network.AzureFirewallNetworkRuleProtocolAny), + string(network.AzureFirewallNetworkRuleProtocolICMP), + string(network.AzureFirewallNetworkRuleProtocolTCP), + string(network.AzureFirewallNetworkRuleProtocolUDP), }, false), }, - Set: schema.HashString, + Set: pluginsdk.HashString, }, }, }, @@ -140,7 +139,7 @@ func resourceFirewallNetworkRuleCollection() *schema.Resource { } } -func resourceFirewallNetworkRuleCollectionCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceFirewallNetworkRuleCollectionCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Firewall.AzureFirewallsClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -167,7 +166,7 @@ func resourceFirewallNetworkRuleCollectionCreateUpdate(d *schema.ResourceData, m } ruleCollections := *props.NetworkRuleCollections - networkRules, err := expandFirewallNetworkRules(d.Get("rule").(*schema.Set)) + networkRules, err := expandFirewallNetworkRules(d.Get("rule").(*pluginsdk.Set)) if err != nil { return fmt.Errorf("expanding Firewall Network Rules: %+v", err) } @@ -253,7 +252,7 @@ func resourceFirewallNetworkRuleCollectionCreateUpdate(d *schema.ResourceData, m return resourceFirewallNetworkRuleCollectionRead(d, meta) } -func resourceFirewallNetworkRuleCollectionRead(d *schema.ResourceData, meta interface{}) error { +func resourceFirewallNetworkRuleCollectionRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Firewall.AzureFirewallsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -326,7 +325,7 @@ func resourceFirewallNetworkRuleCollectionRead(d *schema.ResourceData, meta inte return nil } -func resourceFirewallNetworkRuleCollectionDelete(d *schema.ResourceData, meta interface{}) error { +func resourceFirewallNetworkRuleCollectionDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Firewall.AzureFirewallsClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -385,7 +384,7 @@ func resourceFirewallNetworkRuleCollectionDelete(d *schema.ResourceData, meta in return nil } -func expandFirewallNetworkRules(input *schema.Set) (*[]network.AzureFirewallNetworkRule, error) { +func expandFirewallNetworkRules(input *pluginsdk.Set) (*[]network.AzureFirewallNetworkRule, error) { nwRules := input.List() rules := make([]network.AzureFirewallNetworkRule, 0) @@ -396,12 +395,12 @@ func expandFirewallNetworkRules(input *schema.Set) (*[]network.AzureFirewallNetw description := rule["description"].(string) sourceAddresses := make([]string, 0) - for _, v := range rule["source_addresses"].(*schema.Set).List() { + for _, v := range rule["source_addresses"].(*pluginsdk.Set).List() { sourceAddresses = append(sourceAddresses, v.(string)) } sourceIpGroups := make([]string, 0) - for _, v := range rule["source_ip_groups"].(*schema.Set).List() { + for _, v := range rule["source_ip_groups"].(*pluginsdk.Set).List() { sourceIpGroups = append(sourceIpGroups, v.(string)) } @@ -410,17 +409,17 @@ func expandFirewallNetworkRules(input *schema.Set) (*[]network.AzureFirewallNetw } destinationAddresses := make([]string, 0) - for _, v := range rule["destination_addresses"].(*schema.Set).List() { + for _, v := range rule["destination_addresses"].(*pluginsdk.Set).List() { destinationAddresses = append(destinationAddresses, v.(string)) } destinationIpGroups := make([]string, 0) - for _, v := range rule["destination_ip_groups"].(*schema.Set).List() { + for _, v := range rule["destination_ip_groups"].(*pluginsdk.Set).List() { destinationIpGroups = append(destinationIpGroups, v.(string)) } destinationFqdns := make([]string, 0) - for _, v := range rule["destination_fqdns"].(*schema.Set).List() { + for _, v := range rule["destination_fqdns"].(*pluginsdk.Set).List() { destinationFqdns = append(destinationFqdns, v.(string)) } @@ -429,7 +428,7 @@ func expandFirewallNetworkRules(input *schema.Set) (*[]network.AzureFirewallNetw } destinationPorts := make([]string, 0) - for _, v := range rule["destination_ports"].(*schema.Set).List() { + for _, v := range rule["destination_ports"].(*pluginsdk.Set).List() { destinationPorts = append(destinationPorts, v.(string)) } @@ -445,7 +444,7 @@ func expandFirewallNetworkRules(input *schema.Set) (*[]network.AzureFirewallNetw } nrProtocols := make([]network.AzureFirewallNetworkRuleProtocol, 0) - protocols := rule["protocols"].(*schema.Set) + protocols := rule["protocols"].(*pluginsdk.Set) for _, v := range protocols.List() { s := network.AzureFirewallNetworkRuleProtocol(v.(string)) nrProtocols = append(nrProtocols, s) @@ -467,12 +466,12 @@ func flattenFirewallNetworkRuleCollectionRules(rules *[]network.AzureFirewallNet var ( name string description string - sourceAddresses *schema.Set - sourceIPGroups *schema.Set - destAddresses *schema.Set - destIPGroups *schema.Set - destPorts *schema.Set - destFqdns *schema.Set + sourceAddresses *pluginsdk.Set + sourceIPGroups *pluginsdk.Set + destAddresses *pluginsdk.Set + destIPGroups *pluginsdk.Set + destPorts *pluginsdk.Set + destFqdns *pluginsdk.Set ) if rule.Name != nil { diff --git a/azurerm/internal/services/firewall/firewall_network_rule_collection_resource_test.go b/azurerm/internal/services/firewall/firewall_network_rule_collection_resource_test.go index afc8eae84bf3..c2d3e4fc643d 100644 --- a/azurerm/internal/services/firewall/firewall_network_rule_collection_resource_test.go +++ b/azurerm/internal/services/firewall/firewall_network_rule_collection_resource_test.go @@ -6,14 +6,13 @@ import ( "regexp" "testing" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/firewall/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -24,10 +23,10 @@ func TestAccFirewallNetworkRuleCollection_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_network_rule_collection", "test") r := FirewallNetworkRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestnrc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -43,10 +42,10 @@ func TestAccFirewallNetworkRuleCollection_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_network_rule_collection", "test") r := FirewallNetworkRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -61,10 +60,10 @@ func TestAccFirewallNetworkRuleCollection_updatedName(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_network_rule_collection", "test") r := FirewallNetworkRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestnrc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -75,7 +74,7 @@ func TestAccFirewallNetworkRuleCollection_updatedName(t *testing.T) { data.ImportStep(), { Config: r.updatedName(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestnrc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -92,10 +91,10 @@ func TestAccFirewallNetworkRuleCollection_multipleRuleCollections(t *testing.T) r := FirewallNetworkRuleCollectionResource{} secondRule := "azurerm_firewall_network_rule_collection.test_add" - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestnrc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -105,22 +104,22 @@ func TestAccFirewallNetworkRuleCollection_multipleRuleCollections(t *testing.T) }, { Config: r.multiple(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestnrc"), check.That(data.ResourceName).Key("priority").HasValue("100"), check.That(data.ResourceName).Key("action").HasValue("Allow"), check.That(data.ResourceName).Key("rule.#").HasValue("1"), check.That(secondRule).ExistsInAzure(r), - resource.TestCheckResourceAttr(secondRule, "name", "acctestnrc_add"), - resource.TestCheckResourceAttr(secondRule, "priority", "200"), - resource.TestCheckResourceAttr(secondRule, "action", "Deny"), - resource.TestCheckResourceAttr(secondRule, "rule.#", "1"), + acceptance.TestCheckResourceAttr(secondRule, "name", "acctestnrc_add"), + acceptance.TestCheckResourceAttr(secondRule, "priority", "200"), + acceptance.TestCheckResourceAttr(secondRule, "action", "Deny"), + acceptance.TestCheckResourceAttr(secondRule, "rule.#", "1"), ), }, { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestnrc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -137,35 +136,35 @@ func TestAccFirewallNetworkRuleCollection_update(t *testing.T) { r := FirewallNetworkRuleCollectionResource{} secondResourceName := "azurerm_firewall_network_rule_collection.test_add" - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.multiple(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestnrc"), check.That(data.ResourceName).Key("priority").HasValue("100"), check.That(data.ResourceName).Key("action").HasValue("Allow"), check.That(data.ResourceName).Key("rule.#").HasValue("1"), check.That(secondResourceName).ExistsInAzure(r), - resource.TestCheckResourceAttr(secondResourceName, "name", "acctestnrc_add"), - resource.TestCheckResourceAttr(secondResourceName, "priority", "200"), - resource.TestCheckResourceAttr(secondResourceName, "action", "Deny"), - resource.TestCheckResourceAttr(secondResourceName, "rule.#", "1"), + acceptance.TestCheckResourceAttr(secondResourceName, "name", "acctestnrc_add"), + acceptance.TestCheckResourceAttr(secondResourceName, "priority", "200"), + acceptance.TestCheckResourceAttr(secondResourceName, "action", "Deny"), + acceptance.TestCheckResourceAttr(secondResourceName, "rule.#", "1"), ), }, { Config: r.multipleUpdate(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestnrc"), check.That(data.ResourceName).Key("priority").HasValue("300"), check.That(data.ResourceName).Key("action").HasValue("Deny"), check.That(data.ResourceName).Key("rule.#").HasValue("1"), check.That(secondResourceName).ExistsInAzure(r), - resource.TestCheckResourceAttr(secondResourceName, "name", "acctestnrc_add"), - resource.TestCheckResourceAttr(secondResourceName, "priority", "400"), - resource.TestCheckResourceAttr(secondResourceName, "action", "Allow"), - resource.TestCheckResourceAttr(secondResourceName, "rule.#", "1"), + acceptance.TestCheckResourceAttr(secondResourceName, "name", "acctestnrc_add"), + acceptance.TestCheckResourceAttr(secondResourceName, "priority", "400"), + acceptance.TestCheckResourceAttr(secondResourceName, "action", "Allow"), + acceptance.TestCheckResourceAttr(secondResourceName, "rule.#", "1"), ), }, }) @@ -175,10 +174,10 @@ func TestAccFirewallNetworkRuleCollection_disappears(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_network_rule_collection", "test") r := FirewallNetworkRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestnrc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -197,10 +196,10 @@ func TestAccFirewallNetworkRuleCollection_multipleRules(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_network_rule_collection", "test") r := FirewallNetworkRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestnrc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -210,7 +209,7 @@ func TestAccFirewallNetworkRuleCollection_multipleRules(t *testing.T) { }, { Config: r.multipleRules(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestnrc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -220,7 +219,7 @@ func TestAccFirewallNetworkRuleCollection_multipleRules(t *testing.T) { }, { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestnrc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -235,10 +234,10 @@ func TestAccFirewallNetworkRuleCollection_updateFirewallTags(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_network_rule_collection", "test") r := FirewallNetworkRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestnrc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -248,7 +247,7 @@ func TestAccFirewallNetworkRuleCollection_updateFirewallTags(t *testing.T) { }, { Config: r.updateFirewallTags(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestnrc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -263,10 +262,10 @@ func TestAccFirewallNetworkRuleCollection_serviceTag(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_network_rule_collection", "test") r := FirewallNetworkRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.serviceTag(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("name").HasValue("acctestnrc"), check.That(data.ResourceName).Key("priority").HasValue("100"), @@ -282,10 +281,10 @@ func TestAccFirewallNetworkRuleCollection_ipGroup(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_network_rule_collection", "test") r := FirewallNetworkRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.ipGroup(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("rule.#").HasValue("1"), ), @@ -298,10 +297,10 @@ func TestAccFirewallNetworkRuleCollection_fqdns(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_network_rule_collection", "test") r := FirewallNetworkRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.fqdns(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -313,7 +312,7 @@ func TestAccFirewallNetworkRuleCollection_noSource(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_network_rule_collection", "test") r := FirewallNetworkRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.noSource(data), ExpectError: regexp.MustCompile(fmt.Sprintf("at least one of %q and %q must be specified", "source_addresses", "source_ip_groups")), @@ -325,7 +324,7 @@ func TestAccFirewallNetworkRuleCollection_noDestination(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_network_rule_collection", "test") r := FirewallNetworkRuleCollectionResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.noDestination(data), ExpectError: regexp.MustCompile(fmt.Sprintf("at least one of %q, %q and %q must be specified", "destination_addresses", "destination_ip_groups", "destination_fqdns")), @@ -333,7 +332,7 @@ func TestAccFirewallNetworkRuleCollection_noDestination(t *testing.T) { }) } -func (FirewallNetworkRuleCollectionResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (FirewallNetworkRuleCollectionResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { var id, err = azure.ParseAzureResourceID(state.ID) if err != nil { return nil, err @@ -364,7 +363,7 @@ func (FirewallNetworkRuleCollectionResource) Exists(ctx context.Context, clients } func (r FirewallNetworkRuleCollectionResource) checkFirewallNetworkRuleCollectionDoesNotExist(collectionName string) acceptance.ClientCheckFunc { - return func(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) error { + return func(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) error { // Ensure we have enough information in state to look up in API id, err := parse.FirewallNetworkRuleCollectionID(state.ID) if err != nil { @@ -389,7 +388,7 @@ func (r FirewallNetworkRuleCollectionResource) checkFirewallNetworkRuleCollectio } } -func (FirewallNetworkRuleCollectionResource) Destroy(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (FirewallNetworkRuleCollectionResource) Destroy(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.FirewallNetworkRuleCollectionID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/firewall/firewall_policy_data_source.go b/azurerm/internal/services/firewall/firewall_policy_data_source.go index 3fb8805ed9c8..1885b1eff508 100644 --- a/azurerm/internal/services/firewall/firewall_policy_data_source.go +++ b/azurerm/internal/services/firewall/firewall_policy_data_source.go @@ -4,27 +4,27 @@ import ( "fmt" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/firewall/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func FirewallDataSourcePolicy() *schema.Resource { - return &schema.Resource{ +func FirewallDataSourcePolicy() *pluginsdk.Resource { + return &pluginsdk.Resource{ Read: FirewallDataSourcePolicyRead, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(5 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.FirewallPolicyName(), }, @@ -34,36 +34,36 @@ func FirewallDataSourcePolicy() *schema.Resource { "location": azure.SchemaLocationForDataSource(), "base_policy_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "child_policies": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "dns": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "servers": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "proxy_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, "network_rule_fqdn_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Computed: true, }, }, @@ -71,43 +71,43 @@ func FirewallDataSourcePolicy() *schema.Resource { }, "firewalls": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "rule_collection_groups": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "threat_intelligence_mode": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, "threat_intelligence_allowlist": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "ip_addresses": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "fqdns": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, }, @@ -119,7 +119,7 @@ func FirewallDataSourcePolicy() *schema.Resource { } } -func FirewallDataSourcePolicyRead(d *schema.ResourceData, meta interface{}) error { +func FirewallDataSourcePolicyRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Firewall.FirewallPolicyClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/firewall/firewall_policy_data_source_test.go b/azurerm/internal/services/firewall/firewall_policy_data_source_test.go index b8be61b99f9d..9dbea4aa6a9c 100644 --- a/azurerm/internal/services/firewall/firewall_policy_data_source_test.go +++ b/azurerm/internal/services/firewall/firewall_policy_data_source_test.go @@ -4,13 +4,10 @@ import ( "fmt" "testing" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" - - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" - - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" ) type FirewallPolicyDataSource struct { @@ -21,15 +18,15 @@ func TestAccFirewallPolicyDataSource_basic(t *testing.T) { r := FirewallPolicyDataSource{} dataParent := acceptance.BuildTestData(t, "data.azurerm_firewall_policy", "test-parent") - data.DataSourceTest(t, []resource.TestStep{ + data.DataSourceTest(t, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("name").Exists(), check.That(data.ResourceName).Key("resource_group_name").Exists(), check.That(data.ResourceName).Key("location").HasValue(location.Normalize(data.Locations.Primary)), check.That(data.ResourceName).Key("base_policy_id").Exists(), - resource.TestCheckResourceAttr(dataParent.ResourceName, "child_policies.#", "1"), + acceptance.TestCheckResourceAttr(dataParent.ResourceName, "child_policies.#", "1"), check.That(data.ResourceName).Key("dns.0.proxy_enabled").HasValue("true"), check.That(data.ResourceName).Key("dns.0.servers.#").HasValue("2"), check.That(data.ResourceName).Key("threat_intelligence_mode").HasValue(string(network.AzureFirewallThreatIntelModeAlert)), diff --git a/azurerm/internal/services/firewall/firewall_policy_resource.go b/azurerm/internal/services/firewall/firewall_policy_resource.go index d3d2c3f1ed3d..a7547b37da26 100644 --- a/azurerm/internal/services/firewall/firewall_policy_resource.go +++ b/azurerm/internal/services/firewall/firewall_policy_resource.go @@ -5,10 +5,8 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -18,14 +16,15 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/firewall/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) const azureFirewallPolicyResourceName = "azurerm_firewall_policy" -func resourceFirewallPolicy() *schema.Resource { - return &schema.Resource{ +func resourceFirewallPolicy() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceFirewallPolicyCreateUpdate, Read: resourceFirewallPolicyRead, Update: resourceFirewallPolicyCreateUpdate, @@ -36,16 +35,16 @@ func resourceFirewallPolicy() *schema.Resource { return err }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.FirewallPolicyName(), @@ -54,7 +53,7 @@ func resourceFirewallPolicy() *schema.Resource { "resource_group_name": azure.SchemaResourceGroupName(), "sku": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ForceNew: true, @@ -67,34 +66,34 @@ func resourceFirewallPolicy() *schema.Resource { "location": location.Schema(), "base_policy_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validate.FirewallPolicyID, }, "dns": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "servers": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.IsIPv4Address, }, }, "proxy_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Default: false, }, // TODO 3.0 - remove this property "network_rule_fqdn_enabled": { - Type: schema.TypeBool, + Type: pluginsdk.TypeBool, Optional: true, Computed: true, Deprecated: "This property has been deprecated as the service team has removed it from all API versions and is no longer supported by Azure. It will be removed in v3.0 of the provider.", @@ -104,7 +103,7 @@ func resourceFirewallPolicy() *schema.Resource { }, "threat_intelligence_mode": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: string(network.AzureFirewallThreatIntelModeAlert), ValidateFunc: validation.StringInSlice([]string{ @@ -115,26 +114,26 @@ func resourceFirewallPolicy() *schema.Resource { }, "threat_intelligence_allowlist": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "ip_addresses": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.Any(validation.IsIPv4Range, validation.IsIPv4Address), }, AtLeastOneOf: []string{"threat_intelligence_allowlist.0.ip_addresses", "threat_intelligence_allowlist.0.fqdns"}, }, "fqdns": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, AtLeastOneOf: []string{"threat_intelligence_allowlist.0.ip_addresses", "threat_intelligence_allowlist.0.fqdns"}, @@ -144,26 +143,26 @@ func resourceFirewallPolicy() *schema.Resource { }, "child_policies": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "firewalls": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, "rule_collection_groups": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, }, }, @@ -172,7 +171,7 @@ func resourceFirewallPolicy() *schema.Resource { } } -func resourceFirewallPolicyCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceFirewallPolicyCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Firewall.FirewallPolicyClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -231,7 +230,7 @@ func resourceFirewallPolicyCreateUpdate(d *schema.ResourceData, meta interface{} return resourceFirewallPolicyRead(d, meta) } -func resourceFirewallPolicyRead(d *schema.ResourceData, meta interface{}) error { +func resourceFirewallPolicyRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Firewall.FirewallPolicyClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -293,7 +292,7 @@ func resourceFirewallPolicyRead(d *schema.ResourceData, meta interface{}) error return tags.FlattenAndSet(d, resp.Tags) } -func resourceFirewallPolicyDelete(d *schema.ResourceData, meta interface{}) error { +func resourceFirewallPolicyDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Firewall.FirewallPolicyClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -326,8 +325,8 @@ func expandFirewallPolicyThreatIntelWhitelist(input []interface{}) *network.Fire raw := input[0].(map[string]interface{}) output := &network.FirewallPolicyThreatIntelWhitelist{ - IPAddresses: utils.ExpandStringSlice(raw["ip_addresses"].(*schema.Set).List()), - Fqdns: utils.ExpandStringSlice(raw["fqdns"].(*schema.Set).List()), + IPAddresses: utils.ExpandStringSlice(raw["ip_addresses"].(*pluginsdk.Set).List()), + Fqdns: utils.ExpandStringSlice(raw["fqdns"].(*pluginsdk.Set).List()), } return output @@ -340,7 +339,7 @@ func expandFirewallPolicyDNSSetting(input []interface{}) *network.DNSSettings { raw := input[0].(map[string]interface{}) output := &network.DNSSettings{ - Servers: utils.ExpandStringSlice(raw["servers"].(*schema.Set).List()), + Servers: utils.ExpandStringSlice(raw["servers"].(*pluginsdk.Set).List()), EnableProxy: utils.Bool(raw["proxy_enabled"].(bool)), } diff --git a/azurerm/internal/services/firewall/firewall_policy_resource_test.go b/azurerm/internal/services/firewall/firewall_policy_resource_test.go index bfb1d5c3d588..b4bc2d6a49b2 100644 --- a/azurerm/internal/services/firewall/firewall_policy_resource_test.go +++ b/azurerm/internal/services/firewall/firewall_policy_resource_test.go @@ -5,12 +5,11 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/firewall/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -21,10 +20,10 @@ func TestAccFirewallPolicy_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_policy", "test") r := FirewallPolicyResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -36,10 +35,10 @@ func TestAccFirewallPolicy_basicPremium(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_policy", "test") r := FirewallPolicyResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basicPremium(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -51,10 +50,10 @@ func TestAccFirewallPolicy_complete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_policy", "test") r := FirewallPolicyResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.complete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -66,24 +65,24 @@ func TestAccFirewallPolicy_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_policy", "test") r := FirewallPolicyResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.complete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -95,10 +94,10 @@ func TestAccFirewallPolicy_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_policy", "test") r := FirewallPolicyResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -110,10 +109,10 @@ func TestAccFirewallPolicy_inherit(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_policy", "test") r := FirewallPolicyResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.inherit(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -121,7 +120,7 @@ func TestAccFirewallPolicy_inherit(t *testing.T) { }) } -func (FirewallPolicyResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (FirewallPolicyResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { var id, err = parse.FirewallPolicyID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/firewall/firewall_policy_rule_collection_group_resource.go b/azurerm/internal/services/firewall/firewall_policy_rule_collection_group_resource.go index 761cf84d7103..5a44531bf2ea 100644 --- a/azurerm/internal/services/firewall/firewall_policy_rule_collection_group_resource.go +++ b/azurerm/internal/services/firewall/firewall_policy_rule_collection_group_resource.go @@ -6,10 +6,8 @@ import ( "strconv" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/hashicorp/go-azure-helpers/response" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" azValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -17,12 +15,13 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/firewall/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/firewall/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -func resourceFirewallPolicyRuleCollectionGroup() *schema.Resource { - return &schema.Resource{ +func resourceFirewallPolicyRuleCollectionGroup() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceFirewallPolicyRuleCollectionGroupCreateUpdate, Read: resourceFirewallPolicyRuleCollectionGroupRead, Update: resourceFirewallPolicyRuleCollectionGroupCreateUpdate, @@ -33,52 +32,52 @@ func resourceFirewallPolicyRuleCollectionGroup() *schema.Resource { return err }), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.FirewallPolicyRuleCollectionGroupName(), }, "firewall_policy_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.FirewallPolicyID, }, "priority": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, ValidateFunc: validation.IntBetween(100, 65000), }, "application_rule_collection": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, "priority": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, ValidateFunc: validation.IntBetween(100, 65000), }, "action": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ string(network.FirewallPolicyFilterRuleCollectionActionTypeAllow), @@ -86,23 +85,23 @@ func resourceFirewallPolicyRuleCollectionGroup() *schema.Resource { }, false), }, "rule": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.FirewallPolicyRuleName(), }, "protocols": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "type": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ string(network.FirewallPolicyRuleApplicationProtocolTypeHTTP), @@ -110,7 +109,7 @@ func resourceFirewallPolicyRuleCollectionGroup() *schema.Resource { }, false), }, "port": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, ValidateFunc: validation.IntBetween(0, 64000), }, @@ -118,10 +117,10 @@ func resourceFirewallPolicyRuleCollectionGroup() *schema.Resource { }, }, "source_addresses": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.Any( validation.IsIPAddress, validation.IsCIDR, @@ -130,26 +129,26 @@ func resourceFirewallPolicyRuleCollectionGroup() *schema.Resource { }, }, "source_ip_groups": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, }, "destination_fqdns": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, }, "destination_fqdn_tags": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, }, @@ -161,23 +160,23 @@ func resourceFirewallPolicyRuleCollectionGroup() *schema.Resource { }, "network_rule_collection": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, "priority": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, ValidateFunc: validation.IntBetween(100, 65000), }, "action": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ string(network.FirewallPolicyFilterRuleCollectionActionTypeAllow), @@ -185,21 +184,21 @@ func resourceFirewallPolicyRuleCollectionGroup() *schema.Resource { }, false), }, "rule": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.FirewallPolicyRuleName(), }, "protocols": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringInSlice([]string{ string(network.FirewallPolicyRuleNetworkProtocolAny), string(network.FirewallPolicyRuleNetworkProtocolTCP), @@ -209,10 +208,10 @@ func resourceFirewallPolicyRuleCollectionGroup() *schema.Resource { }, }, "source_addresses": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.Any( validation.IsIPAddress, validation.IsCIDR, @@ -221,43 +220,43 @@ func resourceFirewallPolicyRuleCollectionGroup() *schema.Resource { }, }, "source_ip_groups": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, }, "destination_addresses": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, // Can be IP address, CIDR, "*", or service tag ValidateFunc: validation.StringIsNotEmpty, }, }, "destination_ip_groups": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, }, "destination_fqdns": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, }, "destination_ports": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.Any( azValidate.PortOrPortRangeWithin(1, 65535), validation.StringInSlice([]string{`*`}, false), @@ -272,23 +271,23 @@ func resourceFirewallPolicyRuleCollectionGroup() *schema.Resource { }, "nat_rule_collection": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, "priority": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, ValidateFunc: validation.IntBetween(100, 65000), }, "action": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ // Hardcode to using `Dnat` instead of the one defined in Swagger (i.e. network.DNAT) because of: https://github.com/Azure/azure-rest-api-specs/issues/9986 @@ -299,21 +298,21 @@ func resourceFirewallPolicyRuleCollectionGroup() *schema.Resource { }, false), }, "rule": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validate.FirewallPolicyRuleName(), }, "protocols": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringInSlice([]string{ string(network.FirewallPolicyRuleNetworkProtocolTCP), string(network.FirewallPolicyRuleNetworkProtocolUDP), @@ -321,10 +320,10 @@ func resourceFirewallPolicyRuleCollectionGroup() *schema.Resource { }, }, "source_addresses": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.Any( validation.IsIPAddress, validation.IsCIDR, @@ -333,15 +332,15 @@ func resourceFirewallPolicyRuleCollectionGroup() *schema.Resource { }, }, "source_ip_groups": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, }, }, "destination_address": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.Any( validation.IsIPAddress, @@ -349,20 +348,20 @@ func resourceFirewallPolicyRuleCollectionGroup() *schema.Resource { ), }, "destination_ports": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: azValidate.PortOrPortRangeWithin(1, 64000), }, }, "translated_address": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.IsIPAddress, }, "translated_port": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Required: true, ValidateFunc: validation.IsPortNumber, }, @@ -376,7 +375,7 @@ func resourceFirewallPolicyRuleCollectionGroup() *schema.Resource { } } -func resourceFirewallPolicyRuleCollectionGroupCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceFirewallPolicyRuleCollectionGroupCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Firewall.FirewallPolicyRuleGroupClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -409,9 +408,9 @@ func resourceFirewallPolicyRuleCollectionGroupCreateUpdate(d *schema.ResourceDat }, } var rulesCollections []network.BasicFirewallPolicyRuleCollection - rulesCollections = append(rulesCollections, expandFirewallPolicyRuleCollectionApplication(d.Get("application_rule_collection").(*schema.Set).List())...) - rulesCollections = append(rulesCollections, expandFirewallPolicyRuleCollectionNetwork(d.Get("network_rule_collection").(*schema.Set).List())...) - rulesCollections = append(rulesCollections, expandFirewallPolicyRuleCollectionNat(d.Get("nat_rule_collection").(*schema.Set).List())...) + rulesCollections = append(rulesCollections, expandFirewallPolicyRuleCollectionApplication(d.Get("application_rule_collection").(*pluginsdk.Set).List())...) + rulesCollections = append(rulesCollections, expandFirewallPolicyRuleCollectionNetwork(d.Get("network_rule_collection").(*pluginsdk.Set).List())...) + rulesCollections = append(rulesCollections, expandFirewallPolicyRuleCollectionNat(d.Get("nat_rule_collection").(*pluginsdk.Set).List())...) param.FirewallPolicyRuleCollectionGroupProperties.RuleCollections = &rulesCollections future, err := client.CreateOrUpdate(ctx, policyId.ResourceGroup, policyId.Name, name, param) @@ -438,7 +437,7 @@ func resourceFirewallPolicyRuleCollectionGroupCreateUpdate(d *schema.ResourceDat return resourceFirewallPolicyRuleCollectionGroupRead(d, meta) } -func resourceFirewallPolicyRuleCollectionGroupRead(d *schema.ResourceData, meta interface{}) error { +func resourceFirewallPolicyRuleCollectionGroupRead(d *pluginsdk.ResourceData, meta interface{}) error { subscriptionId := meta.(*clients.Client).Account.SubscriptionId client := meta.(*clients.Client).Firewall.FirewallPolicyRuleGroupClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) @@ -482,7 +481,7 @@ func resourceFirewallPolicyRuleCollectionGroupRead(d *schema.ResourceData, meta return nil } -func resourceFirewallPolicyRuleCollectionGroupDelete(d *schema.ResourceData, meta interface{}) error { +func resourceFirewallPolicyRuleCollectionGroupDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Firewall.FirewallPolicyRuleGroupClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() @@ -527,7 +526,7 @@ func expandFirewallPolicyRuleCollectionNat(input []interface{}) []network.BasicF Action: &network.FirewallPolicyNatRuleCollectionAction{ Type: network.FirewallPolicyNatRuleCollectionActionType(rule["action"].(string)), }, - Rules: expandFirewallPolicyRuleNat(rule["rule"].(*schema.Set).List()), + Rules: expandFirewallPolicyRuleNat(rule["rule"].(*pluginsdk.Set).List()), } result = append(result, output) } @@ -545,7 +544,7 @@ func expandFirewallPolicyFilterRuleCollection(input []interface{}, f func(input Name: utils.String(rule["name"].(string)), Priority: utils.Int32(int32(rule["priority"].(int))), RuleCollectionType: network.RuleCollectionTypeFirewallPolicyFilterRuleCollection, - Rules: f(rule["rule"].(*schema.Set).List()), + Rules: f(rule["rule"].(*pluginsdk.Set).List()), } result = append(result, output) } @@ -557,7 +556,7 @@ func expandFirewallPolicyRuleApplication(input []interface{}) *[]network.BasicFi for _, e := range input { condition := e.(map[string]interface{}) var protocols []network.FirewallPolicyRuleApplicationProtocol - for _, p := range condition["protocols"].(*schema.Set).List() { + for _, p := range condition["protocols"].(*pluginsdk.Set).List() { proto := p.(map[string]interface{}) protocols = append(protocols, network.FirewallPolicyRuleApplicationProtocol{ ProtocolType: network.FirewallPolicyRuleApplicationProtocolType(proto["type"].(string)), @@ -568,10 +567,10 @@ func expandFirewallPolicyRuleApplication(input []interface{}) *[]network.BasicFi Name: utils.String(condition["name"].(string)), RuleType: network.RuleTypeApplicationRule, Protocols: &protocols, - SourceAddresses: utils.ExpandStringSlice(condition["source_addresses"].(*schema.Set).List()), - SourceIPGroups: utils.ExpandStringSlice(condition["source_ip_groups"].(*schema.Set).List()), - TargetFqdns: utils.ExpandStringSlice(condition["destination_fqdns"].(*schema.Set).List()), - FqdnTags: utils.ExpandStringSlice(condition["destination_fqdn_tags"].(*schema.Set).List()), + SourceAddresses: utils.ExpandStringSlice(condition["source_addresses"].(*pluginsdk.Set).List()), + SourceIPGroups: utils.ExpandStringSlice(condition["source_ip_groups"].(*pluginsdk.Set).List()), + TargetFqdns: utils.ExpandStringSlice(condition["destination_fqdns"].(*pluginsdk.Set).List()), + FqdnTags: utils.ExpandStringSlice(condition["destination_fqdn_tags"].(*pluginsdk.Set).List()), } result = append(result, output) } @@ -583,19 +582,19 @@ func expandFirewallPolicyRuleNetwork(input []interface{}) *[]network.BasicFirewa for _, e := range input { condition := e.(map[string]interface{}) var protocols []network.FirewallPolicyRuleNetworkProtocol - for _, p := range condition["protocols"].(*schema.Set).List() { + for _, p := range condition["protocols"].(*pluginsdk.Set).List() { protocols = append(protocols, network.FirewallPolicyRuleNetworkProtocol(p.(string))) } output := &network.Rule{ Name: utils.String(condition["name"].(string)), RuleType: network.RuleTypeNetworkRule, IPProtocols: &protocols, - SourceAddresses: utils.ExpandStringSlice(condition["source_addresses"].(*schema.Set).List()), - SourceIPGroups: utils.ExpandStringSlice(condition["source_ip_groups"].(*schema.Set).List()), - DestinationAddresses: utils.ExpandStringSlice(condition["destination_addresses"].(*schema.Set).List()), - DestinationIPGroups: utils.ExpandStringSlice(condition["destination_ip_groups"].(*schema.Set).List()), - DestinationFqdns: utils.ExpandStringSlice(condition["destination_fqdns"].(*schema.Set).List()), - DestinationPorts: utils.ExpandStringSlice(condition["destination_ports"].(*schema.Set).List()), + SourceAddresses: utils.ExpandStringSlice(condition["source_addresses"].(*pluginsdk.Set).List()), + SourceIPGroups: utils.ExpandStringSlice(condition["source_ip_groups"].(*pluginsdk.Set).List()), + DestinationAddresses: utils.ExpandStringSlice(condition["destination_addresses"].(*pluginsdk.Set).List()), + DestinationIPGroups: utils.ExpandStringSlice(condition["destination_ip_groups"].(*pluginsdk.Set).List()), + DestinationFqdns: utils.ExpandStringSlice(condition["destination_fqdns"].(*pluginsdk.Set).List()), + DestinationPorts: utils.ExpandStringSlice(condition["destination_ports"].(*pluginsdk.Set).List()), } result = append(result, output) } @@ -607,7 +606,7 @@ func expandFirewallPolicyRuleNat(input []interface{}) *[]network.BasicFirewallPo for _, e := range input { condition := e.(map[string]interface{}) var protocols []network.FirewallPolicyRuleNetworkProtocol - for _, p := range condition["protocols"].(*schema.Set).List() { + for _, p := range condition["protocols"].(*pluginsdk.Set).List() { protocols = append(protocols, network.FirewallPolicyRuleNetworkProtocol(p.(string))) } destinationAddresses := []string{condition["destination_address"].(string)} @@ -615,10 +614,10 @@ func expandFirewallPolicyRuleNat(input []interface{}) *[]network.BasicFirewallPo Name: utils.String(condition["name"].(string)), RuleType: network.RuleTypeNatRule, IPProtocols: &protocols, - SourceAddresses: utils.ExpandStringSlice(condition["source_addresses"].(*schema.Set).List()), - SourceIPGroups: utils.ExpandStringSlice(condition["source_ip_groups"].(*schema.Set).List()), + SourceAddresses: utils.ExpandStringSlice(condition["source_addresses"].(*pluginsdk.Set).List()), + SourceIPGroups: utils.ExpandStringSlice(condition["source_ip_groups"].(*pluginsdk.Set).List()), DestinationAddresses: &destinationAddresses, - DestinationPorts: utils.ExpandStringSlice(condition["destination_ports"].(*schema.Set).List()), + DestinationPorts: utils.ExpandStringSlice(condition["destination_ports"].(*pluginsdk.Set).List()), TranslatedAddress: utils.String(condition["translated_address"].(string)), TranslatedPort: utils.String(strconv.Itoa(condition["translated_port"].(int))), } diff --git a/azurerm/internal/services/firewall/firewall_policy_rule_collection_group_resource_test.go b/azurerm/internal/services/firewall/firewall_policy_rule_collection_group_resource_test.go index b68df6dd74d1..fd1106ddaf66 100644 --- a/azurerm/internal/services/firewall/firewall_policy_rule_collection_group_resource_test.go +++ b/azurerm/internal/services/firewall/firewall_policy_rule_collection_group_resource_test.go @@ -5,12 +5,11 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/firewall/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -21,10 +20,10 @@ func TestAccFirewallPolicyRuleCollectionGroup_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_policy_rule_collection_group", "test") r := FirewallPolicyRuleCollectionGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -36,10 +35,10 @@ func TestAccFirewallPolicyRuleCollectionGroup_complete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_policy_rule_collection_group", "test") r := FirewallPolicyRuleCollectionGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.complete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -51,24 +50,24 @@ func TestAccFirewallPolicyRuleCollectionGroup_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_policy_rule_collection_group", "test") r := FirewallPolicyRuleCollectionGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.complete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.update(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.complete(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -80,10 +79,10 @@ func TestAccFirewallPolicyRuleCollectionGroup_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall_policy_rule_collection_group", "test") r := FirewallPolicyRuleCollectionGroupResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -91,7 +90,7 @@ func TestAccFirewallPolicyRuleCollectionGroup_requiresImport(t *testing.T) { }) } -func (FirewallPolicyRuleCollectionGroupResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (FirewallPolicyRuleCollectionGroupResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { var id, err = parse.FirewallPolicyRuleCollectionGroupID(state.ID) if err != nil { return nil, err @@ -184,7 +183,7 @@ resource "azurerm_firewall_policy_rule_collection_group" "test" { port = 443 } source_addresses = ["10.0.0.1"] - destination_fqdns = ["terraform.io"] + destination_fqdns = ["pluginsdk.io"] } rule { name = "app_rule_collection1_rule2" @@ -197,7 +196,7 @@ resource "azurerm_firewall_policy_rule_collection_group" "test" { port = 443 } source_ip_groups = [azurerm_ip_group.test_source.id] - destination_fqdns = ["terraform.io"] + destination_fqdns = ["pluginsdk.io"] } rule { name = "app_rule_collection1_rule3" @@ -329,7 +328,7 @@ resource "azurerm_firewall_policy_rule_collection_group" "test" { port = 443 } source_addresses = ["10.0.0.1", "10.0.0.2"] - destination_fqdns = ["terraform.io"] + destination_fqdns = ["pluginsdk.io"] } rule { name = "app_rule_collection1_rule2" @@ -338,7 +337,7 @@ resource "azurerm_firewall_policy_rule_collection_group" "test" { port = 80 } source_ip_groups = [azurerm_ip_group.test_source.id] - destination_fqdns = ["terraform.io"] + destination_fqdns = ["pluginsdk.io"] } rule { name = "app_rule_collection1_rule3" diff --git a/azurerm/internal/services/firewall/firewall_resource.go b/azurerm/internal/services/firewall/firewall_resource.go index cecd1357f9e1..8e2e368c951e 100644 --- a/azurerm/internal/services/firewall/firewall_resource.go +++ b/azurerm/internal/services/firewall/firewall_resource.go @@ -6,9 +6,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -18,14 +16,15 @@ import ( networkValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) var azureFirewallResourceName = "azurerm_firewall" -func resourceFirewall() *schema.Resource { - return &schema.Resource{ +func resourceFirewall() *pluginsdk.Resource { + return &pluginsdk.Resource{ Create: resourceFirewallCreateUpdate, Read: resourceFirewallRead, Update: resourceFirewallCreateUpdate, @@ -33,16 +32,16 @@ func resourceFirewall() *schema.Resource { // TODO: replace this with an importer which validates the ID during import Importer: pluginsdk.DefaultImporter(), - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(90 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(90 * time.Minute), - Delete: schema.DefaultTimeout(90 * time.Minute), + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(90 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(90 * time.Minute), + Delete: pluginsdk.DefaultTimeout(90 * time.Minute), }, - Schema: map[string]*schema.Schema{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.FirewallName, @@ -54,57 +53,57 @@ func resourceFirewall() *schema.Resource { // TODO 3.0: change this to required "sku_name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.AZFWHub), - string(network.AZFWVNet), + string(network.AzureFirewallSkuNameAZFWHub), + string(network.AzureFirewallSkuNameAZFWVNet), }, false), }, // TODO 3.0: change this to required "sku_tier": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Computed: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.Premium), - string(network.Standard), + string(network.AzureFirewallSkuTierPremium), + string(network.AzureFirewallSkuTierStandard), }, false), }, "firewall_policy_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validate.FirewallPolicyID, }, "ip_configuration": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, "subnet_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, ForceNew: true, ValidateFunc: validate.FirewallSubnetName, }, "public_ip_address_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: networkValidate.PublicIpAddressID, }, "private_ip_address": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -112,30 +111,30 @@ func resourceFirewall() *schema.Resource { }, "management_ip_configuration": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, ForceNew: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "name": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsNotEmpty, }, "subnet_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ForceNew: true, ValidateFunc: validate.FirewallManagementSubnetName, }, "public_ip_address_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: networkValidate.PublicIpAddressID, }, "private_ip_address": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -143,7 +142,7 @@ func resourceFirewall() *schema.Resource { }, "threat_intel_mode": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Optional: true, Default: string(network.AzureFirewallThreatIntelModeAlert), ValidateFunc: validation.StringInSlice([]string{ @@ -157,21 +156,21 @@ func resourceFirewall() *schema.Resource { }, "dns_servers": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MinItems: 1, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.IsIPAddress, }, }, "private_ip_ranges": { - Type: schema.TypeSet, + Type: pluginsdk.TypeSet, Optional: true, MinItems: 1, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, ValidateFunc: validation.Any( validation.IsCIDR, validation.StringInSlice([]string{"IANAPrivateRanges"}, false), @@ -180,29 +179,29 @@ func resourceFirewall() *schema.Resource { }, "virtual_hub": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Optional: true, MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ "virtual_hub_id": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Required: true, ValidateFunc: networkValidate.VirtualHubID, }, "public_ip_count": { - Type: schema.TypeInt, + Type: pluginsdk.TypeInt, Optional: true, ValidateFunc: validation.IntAtLeast(1), Default: 1, }, "public_ip_addresses": { - Type: schema.TypeList, + Type: pluginsdk.TypeList, Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, }, "private_ip_address": { - Type: schema.TypeString, + Type: pluginsdk.TypeString, Computed: true, }, }, @@ -216,7 +215,7 @@ func resourceFirewall() *schema.Resource { } } -func resourceFirewallCreateUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceFirewallCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Firewall.AzureFirewallsClient ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -318,7 +317,7 @@ func resourceFirewallCreateUpdate(d *schema.ResourceData, meta interface{}) erro } } - if privateIpRangeSetting := expandFirewallPrivateIpRange(d.Get("private_ip_ranges").(*schema.Set).List()); privateIpRangeSetting != nil { + if privateIpRangeSetting := expandFirewallPrivateIpRange(d.Get("private_ip_ranges").(*pluginsdk.Set).List()); privateIpRangeSetting != nil { for k, v := range privateIpRangeSetting { parameters.AdditionalProperties[k] = v } @@ -373,7 +372,7 @@ func resourceFirewallCreateUpdate(d *schema.ResourceData, meta interface{}) erro return resourceFirewallRead(d, meta) } -func resourceFirewallRead(d *schema.ResourceData, meta interface{}) error { +func resourceFirewallRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Firewall.AzureFirewallsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -447,7 +446,7 @@ func resourceFirewallRead(d *schema.ResourceData, meta interface{}) error { return tags.FlattenAndSet(d, read.Tags) } -func resourceFirewallDelete(d *schema.ResourceData, meta interface{}) error { +func resourceFirewallDelete(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Firewall.AzureFirewallsClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() diff --git a/azurerm/internal/services/firewall/firewall_resource_test.go b/azurerm/internal/services/firewall/firewall_resource_test.go index 489e656fab23..62464027793d 100644 --- a/azurerm/internal/services/firewall/firewall_resource_test.go +++ b/azurerm/internal/services/firewall/firewall_resource_test.go @@ -6,13 +6,12 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/firewall/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -23,10 +22,10 @@ func TestAccFirewall_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall", "test") r := FirewallResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("ip_configuration.0.name").HasValue("configuration"), check.That(data.ResourceName).Key("ip_configuration.0.private_ip_address").Exists(), @@ -40,31 +39,31 @@ func TestAccFirewall_enableDNS(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall", "test") r := FirewallResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.enableDNS(data, "1.1.1.1", "8.8.8.8"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.enableDNS(data, "1.1.1.1"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -76,10 +75,10 @@ func TestAccFirewall_withManagementIp(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall", "test") r := FirewallResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.withManagementIp(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("ip_configuration.0.name").HasValue("configuration"), check.That(data.ResourceName).Key("ip_configuration.0.private_ip_address").Exists(), @@ -95,10 +94,10 @@ func TestAccFirewall_withMultiplePublicIPs(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall", "test") r := FirewallResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.multiplePublicIps(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("ip_configuration.0.name").HasValue("configuration"), check.That(data.ResourceName).Key("ip_configuration.0.private_ip_address").Exists(), @@ -114,10 +113,10 @@ func TestAccFirewall_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall", "test") r := FirewallResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -132,10 +131,10 @@ func TestAccFirewall_withTags(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall", "test") r := FirewallResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.withTags(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("tags.%").HasValue("2"), check.That(data.ResourceName).Key("tags.environment").HasValue("Production"), @@ -144,7 +143,7 @@ func TestAccFirewall_withTags(t *testing.T) { }, { Config: r.withUpdatedTags(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("tags.%").HasValue("1"), check.That(data.ResourceName).Key("tags.environment").HasValue("staging"), @@ -160,10 +159,10 @@ func TestAccFirewall_withZones(t *testing.T) { zones := []string{"1"} zonesUpdate := []string{"1", "3"} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.withZones(data, zones), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("zones.#").HasValue("1"), check.That(data.ResourceName).Key("zones.0").HasValue("1"), @@ -171,7 +170,7 @@ func TestAccFirewall_withZones(t *testing.T) { }, { Config: r.withZones(data, zonesUpdate), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("zones.#").HasValue("2"), @@ -186,10 +185,10 @@ func TestAccFirewall_withoutZone(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall", "test") r := FirewallResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.withoutZone(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -201,7 +200,7 @@ func TestAccFirewall_disappears(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall", "test") r := FirewallResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ data.DisappearsStep(acceptance.DisappearsStepData{ Config: r.basic, TestResource: r, @@ -213,17 +212,17 @@ func TestAccFirewall_withFirewallPolicy(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall", "test") r := FirewallResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.withFirewallPolicy(data, "pol-01"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), { Config: r.withFirewallPolicy(data, "pol-02"), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, @@ -235,10 +234,10 @@ func TestAccFirewall_inVirtualHub(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall", "test") r := FirewallResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.inVirtualHub(data, 1), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("virtual_hub.0.public_ip_addresses.#").HasValue("1"), check.That(data.ResourceName).Key("virtual_hub.0.private_ip_address").Exists(), @@ -247,7 +246,7 @@ func TestAccFirewall_inVirtualHub(t *testing.T) { data.ImportStep(), { Config: r.inVirtualHub(data, 2), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("virtual_hub.0.public_ip_addresses.#").HasValue("2"), check.That(data.ResourceName).Key("virtual_hub.0.private_ip_address").Exists(), @@ -256,7 +255,7 @@ func TestAccFirewall_inVirtualHub(t *testing.T) { data.ImportStep(), { Config: r.inVirtualHub(data, 1), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("virtual_hub.0.public_ip_addresses.#").HasValue("1"), check.That(data.ResourceName).Key("virtual_hub.0.private_ip_address").Exists(), @@ -270,10 +269,10 @@ func TestAccFirewall_privateRanges(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_firewall", "test") r := FirewallResource{} - data.ResourceTest(t, r, []resource.TestStep{ + data.ResourceTest(t, r, []acceptance.TestStep{ { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("ip_configuration.0.name").HasValue("configuration"), check.That(data.ResourceName).Key("ip_configuration.0.private_ip_address").Exists(), @@ -282,7 +281,7 @@ func TestAccFirewall_privateRanges(t *testing.T) { data.ImportStep(), { Config: r.privateRanges(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("ip_configuration.0.name").HasValue("configuration"), check.That(data.ResourceName).Key("ip_configuration.0.private_ip_address").Exists(), @@ -291,7 +290,7 @@ func TestAccFirewall_privateRanges(t *testing.T) { data.ImportStep(), { Config: r.basic(data), - Check: resource.ComposeTestCheckFunc( + Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("ip_configuration.0.name").HasValue("configuration"), check.That(data.ResourceName).Key("ip_configuration.0.private_ip_address").Exists(), @@ -301,7 +300,7 @@ func TestAccFirewall_privateRanges(t *testing.T) { }) } -func (FirewallResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (FirewallResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { var id, err = azure.ParseAzureResourceID(state.ID) if err != nil { return nil, err @@ -317,7 +316,7 @@ func (FirewallResource) Exists(ctx context.Context, clients *clients.Client, sta return utils.Bool(resp.AzureFirewallPropertiesFormat != nil), nil } -func (FirewallResource) Destroy(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { +func (FirewallResource) Destroy(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.FirewallID(state.ID) if err != nil { return nil, err diff --git a/azurerm/internal/services/firewall/registration.go b/azurerm/internal/services/firewall/registration.go index 8c8b1fd134c7..97d396c0ad1c 100644 --- a/azurerm/internal/services/firewall/registration.go +++ b/azurerm/internal/services/firewall/registration.go @@ -1,7 +1,7 @@ package firewall import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) type Registration struct{} @@ -19,16 +19,16 @@ func (r Registration) WebsiteCategories() []string { } // SupportedDataSources returns the supported Data Sources supported by this Service -func (r Registration) SupportedDataSources() map[string]*schema.Resource { - return map[string]*schema.Resource{ +func (r Registration) SupportedDataSources() map[string]*pluginsdk.Resource { + return map[string]*pluginsdk.Resource{ "azurerm_firewall": FirewallDataSource(), "azurerm_firewall_policy": FirewallDataSourcePolicy(), } } // SupportedResources returns the supported Resources supported by this Service -func (r Registration) SupportedResources() map[string]*schema.Resource { - return map[string]*schema.Resource{ +func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { + return map[string]*pluginsdk.Resource{ "azurerm_firewall_application_rule_collection": resourceFirewallApplicationRuleCollection(), "azurerm_firewall_policy": resourceFirewallPolicy(), "azurerm_firewall_policy_rule_collection_group": resourceFirewallPolicyRuleCollectionGroup(), diff --git a/azurerm/internal/services/firewall/subresource.go b/azurerm/internal/services/firewall/subresource.go index fcfb8983ae10..0ddc32aab8de 100644 --- a/azurerm/internal/services/firewall/subresource.go +++ b/azurerm/internal/services/firewall/subresource.go @@ -1,7 +1,7 @@ package firewall import ( - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" ) func flattenNetworkSubResourceID(input *[]network.SubResource) []interface{} { diff --git a/azurerm/internal/services/firewall/validate/firewall_policy_name.go b/azurerm/internal/services/firewall/validate/firewall_policy_name.go index df9573b3dc43..ec798b56a374 100644 --- a/azurerm/internal/services/firewall/validate/firewall_policy_name.go +++ b/azurerm/internal/services/firewall/validate/firewall_policy_name.go @@ -3,7 +3,7 @@ package validate import ( "regexp" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" ) func FirewallPolicyName() func(i interface{}, k string) (warnings []string, errors []error) { diff --git a/azurerm/internal/services/firewall/validate/firewall_policy_rule_collection_group_name.go b/azurerm/internal/services/firewall/validate/firewall_policy_rule_collection_group_name.go index ed8abec66f12..ff34f86e4c6a 100644 --- a/azurerm/internal/services/firewall/validate/firewall_policy_rule_collection_group_name.go +++ b/azurerm/internal/services/firewall/validate/firewall_policy_rule_collection_group_name.go @@ -3,7 +3,7 @@ package validate import ( "regexp" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" ) func FirewallPolicyRuleCollectionGroupName() func(i interface{}, k string) (warnings []string, errors []error) { diff --git a/azurerm/internal/services/firewall/validate/firewall_policy_rule_name.go b/azurerm/internal/services/firewall/validate/firewall_policy_rule_name.go index 12840cd0296e..2aa398327717 100644 --- a/azurerm/internal/services/firewall/validate/firewall_policy_rule_name.go +++ b/azurerm/internal/services/firewall/validate/firewall_policy_rule_name.go @@ -3,7 +3,7 @@ package validate import ( "regexp" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" ) func FirewallPolicyRuleName() func(i interface{}, k string) (warnings []string, errors []error) { diff --git a/azurerm/internal/services/frontdoor/frontdoor_custom_https_configuration_resource.go b/azurerm/internal/services/frontdoor/frontdoor_custom_https_configuration_resource.go index 9dc89b767c40..e0780c97e8e2 100644 --- a/azurerm/internal/services/frontdoor/frontdoor_custom_https_configuration_resource.go +++ b/azurerm/internal/services/frontdoor/frontdoor_custom_https_configuration_resource.go @@ -7,7 +7,6 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/frontdoor/mgmt/2020-01-01/frontdoor" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/frontdoor/migration" @@ -25,33 +24,32 @@ func resourceFrontDoorCustomHttpsConfiguration() *pluginsdk.Resource { Update: resourceFrontDoorCustomHttpsConfigurationCreateUpdate, Delete: resourceFrontDoorCustomHttpsConfigurationDelete, - Importer: &schema.ResourceImporter{ - State: func(d *pluginsdk.ResourceData, meta interface{}) ([]*pluginsdk.ResourceData, error) { - client := meta.(*clients.Client).Frontdoor.FrontDoorsFrontendClient - ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) - defer cancel() + Importer: pluginsdk.ImporterValidatingResourceIdThen(func(id string) error { + _, err := parse.CustomHttpsConfigurationID(id) + return err + }, func(ctx context.Context, d *pluginsdk.ResourceData, meta interface{}) ([]*pluginsdk.ResourceData, error) { + client := meta.(*clients.Client).Frontdoor.FrontDoorsFrontendClient - // validate that the passed ID is a valid custom HTTPS configuration ID - custom, err := parse.CustomHttpsConfigurationID(d.Id()) - if err != nil { - return []*pluginsdk.ResourceData{d}, fmt.Errorf("parsing Custom HTTPS Configuration ID %q for import: %v", d.Id(), err) - } + // validate that the passed ID is a valid custom HTTPS configuration ID + custom, err := parse.CustomHttpsConfigurationID(d.Id()) + if err != nil { + return []*pluginsdk.ResourceData{d}, fmt.Errorf("parsing Custom HTTPS Configuration ID %q for import: %v", d.Id(), err) + } - // convert the passed custom HTTPS configuration ID to a frontend endpoint ID - frontend := parse.NewFrontendEndpointID(custom.SubscriptionId, custom.ResourceGroup, custom.FrontDoorName, custom.CustomHttpsConfigurationName) + // convert the passed custom HTTPS configuration ID to a frontend endpoint ID + frontend := parse.NewFrontendEndpointID(custom.SubscriptionId, custom.ResourceGroup, custom.FrontDoorName, custom.CustomHttpsConfigurationName) - // validate that the frontend endpoint ID exists in the Frontdoor resource - if _, err = client.Get(ctx, custom.ResourceGroup, custom.FrontDoorName, custom.CustomHttpsConfigurationName); err != nil { - return []*pluginsdk.ResourceData{d}, fmt.Errorf("retrieving the Custom HTTPS Configuration(ID: %q) for the frontend endpoint (ID: %q): %s", custom.ID(), frontend.ID(), err) - } + // validate that the frontend endpoint ID exists in the Frontdoor resource + if _, err = client.Get(ctx, custom.ResourceGroup, custom.FrontDoorName, custom.CustomHttpsConfigurationName); err != nil { + return []*pluginsdk.ResourceData{d}, fmt.Errorf("retrieving the Custom HTTPS Configuration(ID: %q) for the frontend endpoint (ID: %q): %s", custom.ID(), frontend.ID(), err) + } - // set the new values for the custom HTTPS configuration resource - d.Set("id", custom.ID()) - d.Set("frontend_endpoint_id", frontend.ID()) + // set the new values for the custom HTTPS configuration resource + d.Set("id", custom.ID()) + d.Set("frontend_endpoint_id", frontend.ID()) - return []*pluginsdk.ResourceData{d}, nil - }, - }, + return []*pluginsdk.ResourceData{d}, nil + }), Timeouts: &pluginsdk.ResourceTimeout{ Create: pluginsdk.DefaultTimeout(6 * time.Hour), diff --git a/azurerm/internal/services/hdinsight/common_hdinsight.go b/azurerm/internal/services/hdinsight/common_hdinsight.go index e87fd616ee9d..f0f6e7f5fc9b 100644 --- a/azurerm/internal/services/hdinsight/common_hdinsight.go +++ b/azurerm/internal/services/hdinsight/common_hdinsight.go @@ -119,7 +119,7 @@ func hdinsightClusterUpdate(clusterKind string, readFunc pluginsdk.ReadFunc) plu Timeout: d.Timeout(pluginsdk.TimeoutUpdate), } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("Error waiting for HDInsight Cluster %q (Resource Group %q) to be running: %s", name, resourceGroup, err) } } diff --git a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go index bab744421567..658310214986 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go @@ -299,7 +299,7 @@ func resourceHDInsightHadoopClusterCreate(d *pluginsdk.ResourceData, meta interf Timeout: d.Timeout(pluginsdk.TimeoutCreate), } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for HDInsight Cluster %q (Resource Group %q) to be running: %s", name, resourceGroup, err) } } diff --git a/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go index 768fc9b576a6..bd607a9f3188 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go @@ -388,6 +388,9 @@ func resourceHDInsightKafkaClusterRead(d *pluginsdk.ResourceData, meta interface } func expandHDInsightKafkaComponentVersion(input []interface{}) map[string]*string { + if len(input) == 0 || input[0] == nil { + return map[string]*string{"kafka": utils.String("")} + } vs := input[0].(map[string]interface{}) return map[string]*string{ "kafka": utils.String(vs["kafka"].(string)), diff --git a/azurerm/internal/services/hpccache/hpc_cache_access_policy_resource_test.go b/azurerm/internal/services/hpccache/hpc_cache_access_policy_resource_test.go index 78d0b9e52ea2..0290dd592a97 100644 --- a/azurerm/internal/services/hpccache/hpc_cache_access_policy_resource_test.go +++ b/azurerm/internal/services/hpccache/hpc_cache_access_policy_resource_test.go @@ -5,12 +5,11 @@ import ( "fmt" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/hpccache" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/hpccache/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/hpccache" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/hpccache/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) diff --git a/azurerm/internal/services/hpccache/hpc_cache_blob_nfs_target_resource.go b/azurerm/internal/services/hpccache/hpc_cache_blob_nfs_target_resource.go new file mode 100644 index 000000000000..796f7e10b4c6 --- /dev/null +++ b/azurerm/internal/services/hpccache/hpc_cache_blob_nfs_target_resource.go @@ -0,0 +1,238 @@ +package hpccache + +import ( + "fmt" + "log" + "time" + + "github.com/Azure/azure-sdk-for-go/services/storagecache/mgmt/2021-03-01/storagecache" + "github.com/hashicorp/go-azure-helpers/response" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/hpccache/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/hpccache/validate" + storageValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/storage/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceHPCCacheBlobNFSTarget() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceHPCCacheBlobNFSTargetCreateUpdate, + Read: resourceHPCCacheBlobNFSTargetRead, + Update: resourceHPCCacheBlobNFSTargetCreateUpdate, + Delete: resourceHPCCacheBlobNFSTargetDelete, + + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.StorageTargetID(id) + return err + }), + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.StorageTargetName, + }, + + "resource_group_name": azure.SchemaResourceGroupName(), + + "cache_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "namespace_path": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validate.CacheNamespacePath, + }, + + "storage_container_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: storageValidate.StorageContainerResourceManagerID, + }, + + // TODO: use SDK enums once following issue is addressed + // https://github.com/Azure/azure-rest-api-specs/issues/13839 + "usage_model": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "READ_HEAVY_INFREQ", + "READ_HEAVY_CHECK_180", + "WRITE_WORKLOAD_15", + "WRITE_AROUND", + "WRITE_WORKLOAD_CHECK_30", + "WRITE_WORKLOAD_CHECK_60", + "WRITE_WORKLOAD_CLOUDWS", + }, false), + }, + + "access_policy_name": { + Type: pluginsdk.TypeString, + Optional: true, + Default: "default", + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + } +} + +func resourceHPCCacheBlobNFSTargetCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).HPCCache.StorageTargetsClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + name := d.Get("name").(string) + resourceGroup := d.Get("resource_group_name").(string) + cache := d.Get("cache_name").(string) + id := parse.NewStorageTargetID(subscriptionId, resourceGroup, cache, name) + + if d.IsNewResource() { + resp, err := client.Get(ctx, id.ResourceGroup, id.CacheName, id.Name) + if err != nil { + if !utils.ResponseWasNotFound(resp.Response) { + return fmt.Errorf("checking for existing %s: %+v", id, err) + } + } + + if !utils.ResponseWasNotFound(resp.Response) { + return tf.ImportAsExistsError("azurerm_hpc_cache_blob_nfs_target", id.ID()) + } + } + + namespacePath := d.Get("namespace_path").(string) + containerId := d.Get("storage_container_id").(string) + + // Construct parameters + namespaceJunction := []storagecache.NamespaceJunction{ + { + NamespacePath: &namespacePath, + TargetPath: utils.String("/"), + NfsExport: utils.String("/"), + NfsAccessPolicy: utils.String(d.Get("access_policy_name").(string)), + }, + } + param := &storagecache.StorageTarget{ + StorageTargetProperties: &storagecache.StorageTargetProperties{ + Junctions: &namespaceJunction, + TargetType: storagecache.StorageTargetTypeBlobNfs, + BlobNfs: &storagecache.BlobNfsTarget{ + Target: utils.String(containerId), + UsageModel: utils.String(d.Get("usage_model").(string)), + }, + }, + } + + future, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.CacheName, id.Name, param) + if err != nil { + return fmt.Errorf("Error creating %s: %+v", id, err) + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for %s: %+v", id, err) + } + + d.SetId(id.ID()) + + return resourceHPCCacheBlobNFSTargetRead(d, meta) +} + +func resourceHPCCacheBlobNFSTargetRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).HPCCache.StorageTargetsClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.StorageTargetID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, id.ResourceGroup, id.CacheName, id.Name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[DEBUG] %s was not found - removing from state!", id) + d.SetId("") + return nil + } + + return fmt.Errorf("retrieving %s: %+v", id, err) + } + + d.Set("name", id.Name) + d.Set("resource_group_name", id.ResourceGroup) + d.Set("cache_name", id.CacheName) + + if props := resp.StorageTargetProperties; props != nil { + if props.TargetType != storagecache.StorageTargetTypeBlobNfs { + return fmt.Errorf("The type of this HPC Cache Target %s is not a Blob NFS Target", id) + } + + storageContainerId := "" + usageModel := "" + if b := props.BlobNfs; b != nil { + if b.Target != nil { + storageContainerId = *b.Target + } + if b.UsageModel != nil { + usageModel = *b.UsageModel + } + } + d.Set("storage_container_id", storageContainerId) + d.Set("usage_model", usageModel) + + namespacePath := "" + accessPolicy := "" + // There is only one namespace path allowed for the blob nfs target, + // which maps to the root path of it. + if props.Junctions != nil && len(*props.Junctions) == 1 && (*props.Junctions)[0].NamespacePath != nil { + namespacePath = *(*props.Junctions)[0].NamespacePath + accessPolicy = *(*props.Junctions)[0].NfsAccessPolicy + } + d.Set("namespace_path", namespacePath) + d.Set("access_policy_name", accessPolicy) + } + return nil +} + +func resourceHPCCacheBlobNFSTargetDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).HPCCache.StorageTargetsClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.StorageTargetID(d.Id()) + if err != nil { + return err + } + + future, err := client.Delete(ctx, id.ResourceGroup, id.CacheName, id.Name) + if err != nil { + return fmt.Errorf("deleting %s: %+v", id, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + if response.WasNotFound(future.Response()) { + return nil + } + return fmt.Errorf("waiting for deletion of %s: %+v", id, err) + } + + return nil +} diff --git a/azurerm/internal/services/hpccache/hpc_cache_blob_nfs_target_resource_test.go b/azurerm/internal/services/hpccache/hpc_cache_blob_nfs_target_resource_test.go new file mode 100644 index 000000000000..0a6bd17c2a47 --- /dev/null +++ b/azurerm/internal/services/hpccache/hpc_cache_blob_nfs_target_resource_test.go @@ -0,0 +1,342 @@ +package hpccache_test + +import ( + "context" + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/hpccache/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type HPCCacheBlobNFSTargetResource struct { +} + +func TestAccHPCCacheBlobNFSTarget_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hpc_cache_blob_nfs_target", "test") + r := HPCCacheBlobNFSTargetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccHPCCacheBlobNFSTarget_accessPolicy(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hpc_cache_blob_nfs_target", "test") + r := HPCCacheBlobNFSTargetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.accessPolicy(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.accessPolicyUpdate(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccHPCCacheBlobNFSTarget_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hpc_cache_blob_nfs_target", "test") + r := HPCCacheBlobNFSTargetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.namespace(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccHPCCacheBlobNFSTarget_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hpc_cache_blob_nfs_target", "test") + r := HPCCacheBlobNFSTargetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func (HPCCacheBlobNFSTargetResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.StorageTargetID(state.ID) + if err != nil { + return nil, err + } + + resp, err := clients.HPCCache.StorageTargetsClient.Get(ctx, id.ResourceGroup, id.CacheName, id.Name) + if err != nil { + return nil, fmt.Errorf("retrieving HPC Cache Blob Target (%s): %+v", id.String(), err) + } + + return utils.Bool(resp.ID != nil), nil +} + +func (r HPCCacheBlobNFSTargetResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_hpc_cache_blob_nfs_target" "test" { + name = "acctest-HPCCTGT-%s" + resource_group_name = azurerm_resource_group.test.name + cache_name = azurerm_hpc_cache.test.name + storage_container_id = jsondecode(azurerm_resource_group_template_deployment.storage-containers.output_content).id.value + namespace_path = "/p1" + usage_model = "READ_HEAVY_INFREQ" +} +`, r.template(data), data.RandomString) +} + +func (r HPCCacheBlobNFSTargetResource) namespace(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_hpc_cache_blob_nfs_target" "test" { + name = "acctest-HPCCTGT-%s" + resource_group_name = azurerm_resource_group.test.name + cache_name = azurerm_hpc_cache.test.name + storage_container_id = jsondecode(azurerm_resource_group_template_deployment.storage-containers.output_content).id.value + namespace_path = "/p2" + usage_model = "READ_HEAVY_INFREQ" +} +`, r.template(data), data.RandomString) +} + +func (r HPCCacheBlobNFSTargetResource) accessPolicy(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_hpc_cache_access_policy" "test" { + name = "p1" + hpc_cache_id = azurerm_hpc_cache.test.id + access_rule { + scope = "default" + access = "rw" + } + + # This is not needed in Terraform v0.13, whilst needed in v0.14. + # Once https://github.com/hashicorp/terraform/issues/28193 is fixed, we can remove this lifecycle block. + lifecycle { + create_before_destroy = true + } +} + +resource "azurerm_hpc_cache_blob_nfs_target" "test" { + name = "acctest-HPCCTGT-%s" + resource_group_name = azurerm_resource_group.test.name + cache_name = azurerm_hpc_cache.test.name + storage_container_id = jsondecode(azurerm_resource_group_template_deployment.storage-containers.output_content).id.value + namespace_path = "/p1" + access_policy_name = azurerm_hpc_cache_access_policy.test.name + usage_model = "READ_HEAVY_INFREQ" +} +`, r.template(data), data.RandomString) +} + +func (r HPCCacheBlobNFSTargetResource) accessPolicyUpdate(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_hpc_cache_access_policy" "test" { + name = "p2" + hpc_cache_id = azurerm_hpc_cache.test.id + access_rule { + scope = "default" + access = "rw" + } + # This is necessary to make the Terraform apply order works correctly. + # Without CBD: azurerm_hpc_cache_access_policy-p1 (destroy) -> azurerm_hpc_cache_blob_nfs_target (update) -> azurerm_hpc_cache_access_policy-p2 (create) + # (the 1st step wil fail as the access policy is under used by the blob target) + # With CBD : azurerm_hpc_cache_access_policy-p2 (create) -> azurerm_hpc_cache_blob_nfs_target (update) -> azurerm_hpc_cache_access_policy-p1 (delete) + lifecycle { + create_before_destroy = true + } +} + +resource "azurerm_hpc_cache_blob_nfs_target" "test" { + name = "acctest-HPCCTGT-%s" + resource_group_name = azurerm_resource_group.test.name + cache_name = azurerm_hpc_cache.test.name + storage_container_id = jsondecode(azurerm_resource_group_template_deployment.storage-containers.output_content).id.value + namespace_path = "/blob_storage1" + access_policy_name = azurerm_hpc_cache_access_policy.test.name + usage_model = "READ_HEAVY_INFREQ" +} +`, r.template(data), data.RandomString) +} + +func (r HPCCacheBlobNFSTargetResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_hpc_cache_blob_nfs_target" "import" { + name = azurerm_hpc_cache_blob_nfs_target.test.name + resource_group_name = azurerm_hpc_cache_blob_nfs_target.test.resource_group_name + cache_name = azurerm_hpc_cache_blob_nfs_target.test.cache_name + storage_container_id = azurerm_hpc_cache_blob_nfs_target.test.storage_container_id + namespace_path = azurerm_hpc_cache_blob_nfs_target.test.namespace_path + usage_model = azurerm_hpc_cache_blob_nfs_target.test.usage_model +} +`, r.basic(data)) +} + +func (HPCCacheBlobNFSTargetResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +provider "azuread" {} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-storage-%[1]d" + location = "%[2]s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctest-VN-%[1]d" + address_space = ["10.0.0.0/16"] + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_subnet" "test" { + name = "acctestsub-%[1]d" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name + address_prefix = "10.0.2.0/24" + service_endpoints = ["Microsoft.Storage"] +} + +data "azuread_service_principal" "test" { + display_name = "HPC Cache Resource Provider" +} + +resource "azurerm_storage_account" "test" { + name = "accteststorgacc%[3]s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_tier = "Standard" + account_kind = "StorageV2" + account_replication_type = "LRS" + is_hns_enabled = true + nfsv3_enabled = true + enable_https_traffic_only = false + network_rules { + default_action = "Deny" + virtual_network_subnet_ids = [azurerm_subnet.test.id] + } +} + +# Due to https://github.com/terraform-providers/terraform-provider-azurerm/issues/2977 and the fact +# that the NFSv3 enabled storage account can't allow public network access - otherwise the NFSv3 protocol will fail, +# we have to use the ARM template to deploy the storage container as a workaround. +# Once the issue above got resolved, we can instead use the azurerm_storage_container resource. +resource "azurerm_resource_group_template_deployment" "storage-containers" { + name = "acctest-strgctn-deployment-%[1]d" + resource_group_name = azurerm_storage_account.test.resource_group_name + deployment_mode = "Incremental" + + parameters_content = jsonencode({ + name = { + value = "acctest-strgctn-hpc-%[1]d" + } + }) + + template_content = < 0 { + block, rest = pem.Decode(rest) + PEMBlocks = append(PEMBlocks, block) + } } - // note PFX passwords are set to an empty string in Key Vault, this include password protected PFX uploads. - pfxKey, pfxCert, err := pkcs12.Decode(pfxBytes, "") - if err != nil { - return fmt.Errorf("decoding certificate (%q): %+v", id.Name, err) + var pemKey []byte + var pemCerts [][]byte + + for _, block := range PEMBlocks { + if strings.Contains(block.Type, "PRIVATE KEY") { + pemKey = block.Bytes + } + + if strings.Contains(block.Type, "CERTIFICATE") { + log.Printf("[DEBUG] Adding Cerrtificate block") + pemCerts = append(pemCerts, block.Bytes) + } } - keyX509, err := x509.MarshalPKCS8PrivateKey(pfxKey) - if err != nil { - return fmt.Errorf("reading key from certificate (%q): %+v", id.Name, err) + var privateKey interface{} + + if *pfx.ContentType == "application/x-pkcs12" { + rsakey, err := x509.ParsePKCS1PrivateKey(pemKey) + if err != nil { + // try to parse as a EC key + eckey, err := x509.ParseECPrivateKey(pemKey) + if err != nil { + return fmt.Errorf("decoding private key: not RSA or ECDSA type (%q): %+v", id.Name, err) + } + privateKey = eckey + } else { + privateKey = rsakey + } + } else { + pkey, err := x509.ParsePKCS8PrivateKey(pemKey) + if err != nil { + return fmt.Errorf("decoding PKCS8 RSA private key (%q): %+v", id.Name, err) + } + privateKey = pkey + } + + var keyX509 []byte + if privateKey != nil { + switch v := privateKey.(type) { + case *ecdsa.PrivateKey: + keyX509, err = x509.MarshalECPrivateKey(privateKey.(*ecdsa.PrivateKey)) + if err != nil { + return fmt.Errorf("marshalling private key type %+v (%q): %+v", v, id.Name, err) + } + case *rsa.PrivateKey: + keyX509 = x509.MarshalPKCS1PrivateKey(privateKey.(*rsa.PrivateKey)) + default: + return fmt.Errorf("marshalling private key type %+v (%q): key type is not supported", v, id.Name) + } } // Encode Key and PEM @@ -173,19 +240,25 @@ func dataSourceArmKeyVaultCertificateDataRead(d *pluginsdk.ResourceData, meta in return fmt.Errorf("encoding Key Vault Certificate Key: %+v", err) } - certBlock := &pem.Block{ - Type: "CERTIFICATE", - Bytes: pfxCert.Raw, - } + certs := "" - var certPEM bytes.Buffer - err = pem.Encode(&certPEM, certBlock) - if err != nil { - return fmt.Errorf("encoding Key Vault Certificate PEM: %+v", err) + for _, pemCert := range pemCerts { + certBlock := &pem.Block{ + Type: "CERTIFICATE", + Bytes: pemCert, + } + + var certPEM bytes.Buffer + err = pem.Encode(&certPEM, certBlock) + if err != nil { + return fmt.Errorf("encoding Key Vault Certificate PEM: %+v", err) + } + certs += certPEM.String() } - d.Set("pem", certPEM.String()) + d.Set("pem", certs) d.Set("key", keyPEM.String()) + d.Set("certificates_count", len(pemCerts)) return tags.FlattenAndSet(d, cert.Tags) } diff --git a/azurerm/internal/services/keyvault/key_vault_certificate_data_data_source_test.go b/azurerm/internal/services/keyvault/key_vault_certificate_data_data_source_test.go index 2151133b018d..e098fba1dffd 100644 --- a/azurerm/internal/services/keyvault/key_vault_certificate_data_data_source_test.go +++ b/azurerm/internal/services/keyvault/key_vault_certificate_data_data_source_test.go @@ -28,6 +28,89 @@ func TestAccDataSourceKeyVaultCertificateData_basic(t *testing.T) { }) } +func TestAccDataSourceKeyVaultCertificateData_ecdsa_PFX(t *testing.T) { + data := acceptance.BuildTestData(t, "data.azurerm_key_vault_certificate_data", "test") + r := KeyVaultCertificateDataDataSource{} + + data.DataSourceTest(t, []acceptance.TestStep{ + { + Config: r.ecdsa_PFX(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).Key("hex").Exists(), + check.That(data.ResourceName).Key("pem").Exists(), + check.That(data.ResourceName).Key("key").Exists(), + ), + }, + }) +} + +func TestAccDataSourceKeyVaultCertificateData_ecdsa_PEM(t *testing.T) { + data := acceptance.BuildTestData(t, "data.azurerm_key_vault_certificate_data", "test") + r := KeyVaultCertificateDataDataSource{} + + data.DataSourceTest(t, []acceptance.TestStep{ + { + Config: r.ecdsa_PEM(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).Key("hex").Exists(), + check.That(data.ResourceName).Key("pem").Exists(), + check.That(data.ResourceName).Key("key").Exists(), + ), + }, + }) +} + +func TestAccDataSourceKeyVaultCertificateData_rsa_bundle_PEM(t *testing.T) { + data := acceptance.BuildTestData(t, "data.azurerm_key_vault_certificate_data", "test") + r := KeyVaultCertificateDataDataSource{} + + data.DataSourceTest(t, []acceptance.TestStep{ + { + Config: r.rsa_bundle_PEM(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).Key("hex").Exists(), + check.That(data.ResourceName).Key("pem").Exists(), + check.That(data.ResourceName).Key("key").Exists(), + check.That(data.ResourceName).Key("certificates_count").HasValue("2"), + ), + }, + }) +} + +func TestAccDataSourceKeyVaultCertificateData_rsa_single_PEM(t *testing.T) { + data := acceptance.BuildTestData(t, "data.azurerm_key_vault_certificate_data", "test") + r := KeyVaultCertificateDataDataSource{} + + data.DataSourceTest(t, []acceptance.TestStep{ + { + Config: r.rsa_single_PEM(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).Key("hex").Exists(), + check.That(data.ResourceName).Key("pem").Exists(), + check.That(data.ResourceName).Key("key").Exists(), + check.That(data.ResourceName).Key("certificates_count").HasValue("1"), + ), + }, + }) +} + +func TestAccDataSourceKeyVaultCertificateData_rsa_bundle_PFX(t *testing.T) { + data := acceptance.BuildTestData(t, "data.azurerm_key_vault_certificate_data", "test") + r := KeyVaultCertificateDataDataSource{} + + data.DataSourceTest(t, []acceptance.TestStep{ + { + Config: r.rsa_bundle_PFX(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).Key("hex").Exists(), + check.That(data.ResourceName).Key("pem").Exists(), + check.That(data.ResourceName).Key("key").Exists(), + check.That(data.ResourceName).Key("certificates_count").HasValue("2"), + ), + }, + }) +} + func (KeyVaultCertificateDataDataSource) basic(data acceptance.TestData) string { return fmt.Sprintf(` %s @@ -39,3 +122,63 @@ data "azurerm_key_vault_certificate_data" "test" { } `, KeyVaultCertificateResource{}.basicImportPFX(data)) } + +func (KeyVaultCertificateDataDataSource) ecdsa_PFX(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +data "azurerm_key_vault_certificate_data" "test" { + name = azurerm_key_vault_certificate.test.name + key_vault_id = azurerm_key_vault.test.id + version = azurerm_key_vault_certificate.test.version +} +`, KeyVaultCertificateResource{}.basicImportPFX_ECDSA(data)) +} + +func (KeyVaultCertificateDataDataSource) ecdsa_PEM(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +data "azurerm_key_vault_certificate_data" "test" { + name = azurerm_key_vault_certificate.test.name + key_vault_id = azurerm_key_vault.test.id + version = azurerm_key_vault_certificate.test.version +} +`, KeyVaultCertificateResource{}.basicImportPEM_ECDSA(data)) +} + +func (KeyVaultCertificateDataDataSource) rsa_bundle_PEM(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +data "azurerm_key_vault_certificate_data" "test" { + name = azurerm_key_vault_certificate.test.name + key_vault_id = azurerm_key_vault.test.id + version = azurerm_key_vault_certificate.test.version +} +`, KeyVaultCertificateResource{}.basicImportPEM_RSA_bundle(data)) +} + +func (KeyVaultCertificateDataDataSource) rsa_single_PEM(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +data "azurerm_key_vault_certificate_data" "test" { + name = azurerm_key_vault_certificate.test.name + key_vault_id = azurerm_key_vault.test.id + version = azurerm_key_vault_certificate.test.version +} +`, KeyVaultCertificateResource{}.basicImportPEM_RSA(data)) +} + +func (KeyVaultCertificateDataDataSource) rsa_bundle_PFX(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +data "azurerm_key_vault_certificate_data" "test" { + name = azurerm_key_vault_certificate.test.name + key_vault_id = azurerm_key_vault.test.id + version = azurerm_key_vault_certificate.test.version +} +`, KeyVaultCertificateResource{}.basicImportPFX_RSA_bundle(data)) +} diff --git a/azurerm/internal/services/keyvault/key_vault_certificate_issuer_resource.go b/azurerm/internal/services/keyvault/key_vault_certificate_issuer_resource.go index df75ff9391c8..d1371abbf804 100644 --- a/azurerm/internal/services/keyvault/key_vault_certificate_issuer_resource.go +++ b/azurerm/internal/services/keyvault/key_vault_certificate_issuer_resource.go @@ -6,7 +6,6 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/keyvault/parse" @@ -23,9 +22,10 @@ func resourceKeyVaultCertificateIssuer() *pluginsdk.Resource { Update: resourceKeyVaultCertificateIssuerCreateOrUpdate, Read: resourceKeyVaultCertificateIssuerRead, Delete: resourceKeyVaultCertificateIssuerDelete, - Importer: &schema.ResourceImporter{ - State: nestedItemResourceImporter, - }, + Importer: pluginsdk.ImporterValidatingResourceIdThen(func(id string) error { + _, err := parse.ParseNestedItemID(id) + return err + }, nestedItemResourceImporter), Timeouts: &pluginsdk.ResourceTimeout{ Create: pluginsdk.DefaultTimeout(30 * time.Minute), diff --git a/azurerm/internal/services/keyvault/key_vault_certificate_resource.go b/azurerm/internal/services/keyvault/key_vault_certificate_resource.go index 294ef139e6ed..0da277554344 100644 --- a/azurerm/internal/services/keyvault/key_vault_certificate_resource.go +++ b/azurerm/internal/services/keyvault/key_vault_certificate_resource.go @@ -13,7 +13,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault" "github.com/Azure/go-autorest/autorest" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/keyvault/parse" @@ -33,9 +32,10 @@ func resourceKeyVaultCertificate() *pluginsdk.Resource { Read: resourceKeyVaultCertificateRead, Delete: resourceKeyVaultCertificateDelete, - Importer: &schema.ResourceImporter{ - State: nestedItemResourceImporter, - }, + Importer: pluginsdk.ImporterValidatingResourceIdThen(func(id string) error { + _, err := parse.ParseNestedItemID(id) + return err + }, nestedItemResourceImporter), Timeouts: &pluginsdk.ResourceTimeout{ Create: pluginsdk.DefaultTimeout(60 * time.Minute), @@ -462,7 +462,7 @@ func resourceKeyVaultCertificateCreate(d *pluginsdk.ResourceData, meta interface Timeout: d.Timeout(pluginsdk.TimeoutCreate), } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("Error waiting for Key Vault Secret %q to become available: %s", name, err) } log.Printf("[DEBUG] Secret %q recovered with ID: %q", name, *recoveredCertificate.ID) @@ -488,7 +488,7 @@ func resourceKeyVaultCertificateCreate(d *pluginsdk.ResourceData, meta interface stateConf.NotFoundChecks = int(math.Floor(float64(stateConf.Timeout) / float64(stateConf.PollInterval))) } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("Error waiting for Certificate %q in Vault %q to become available: %s", name, *keyVaultBaseUrl, err) } } diff --git a/azurerm/internal/services/keyvault/key_vault_certificate_resource_test.go b/azurerm/internal/services/keyvault/key_vault_certificate_resource_test.go index 42c387813456..00b7e994f493 100644 --- a/azurerm/internal/services/keyvault/key_vault_certificate_resource_test.go +++ b/azurerm/internal/services/keyvault/key_vault_certificate_resource_test.go @@ -410,6 +410,193 @@ resource "azurerm_key_vault_certificate" "test" { `, r.template(data), data.RandomString) } +func (r KeyVaultCertificateResource) basicImportPEM_ECDSA(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +%s + +resource "azurerm_key_vault_certificate" "test" { + name = "acctestcert%s" + key_vault_id = azurerm_key_vault.test.id + + certificate { + contents = filebase64("testdata/ecdsa.pem") + password = "" + } + + certificate_policy { + issuer_parameters { + name = "Self" + } + + key_properties { + curve = "P-256" + exportable = true + key_size = 256 + key_type = "EC" + reuse_key = false + } + + secret_properties { + content_type = "application/x-pem-file" + } + } +} +`, r.template(data), data.RandomString) +} + +func (r KeyVaultCertificateResource) basicImportPFX_ECDSA(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +%s + +resource "azurerm_key_vault_certificate" "test" { + name = "acctestcert%s" + key_vault_id = azurerm_key_vault.test.id + + certificate { + contents = filebase64("testdata/ecdsa.pfx") + password = "" + } + + certificate_policy { + issuer_parameters { + name = "Self" + } + + key_properties { + curve = "P-256" + exportable = true + key_size = 256 + key_type = "EC" + reuse_key = false + } + + secret_properties { + content_type = "application/x-pkcs12" + } + } +} +`, r.template(data), data.RandomString) +} + +func (r KeyVaultCertificateResource) basicImportPFX_RSA_bundle(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +%s + +resource "azurerm_key_vault_certificate" "test" { + name = "acctestcert%s" + key_vault_id = azurerm_key_vault.test.id + + certificate { + contents = filebase64("testdata/rsa_bundle.pfx") + password = "" + } + + certificate_policy { + issuer_parameters { + name = "Self" + } + + key_properties { + exportable = true + key_size = 2048 + key_type = "RSA" + reuse_key = false + } + + secret_properties { + content_type = "application/x-pkcs12" + } + } +} +`, r.template(data), data.RandomString) +} + +func (r KeyVaultCertificateResource) basicImportPEM_RSA(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +%s + +resource "azurerm_key_vault_certificate" "test" { + name = "acctestcert%s" + key_vault_id = azurerm_key_vault.test.id + + certificate { + contents = filebase64("testdata/rsa_single.pem") + password = "" + } + + certificate_policy { + issuer_parameters { + name = "Self" + } + + key_properties { + exportable = true + key_size = 4096 + key_type = "RSA" + reuse_key = false + } + + secret_properties { + content_type = "application/x-pem-file" + } + } +} +`, r.template(data), data.RandomString) +} + +func (r KeyVaultCertificateResource) basicImportPEM_RSA_bundle(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +%s + +resource "azurerm_key_vault_certificate" "test" { + name = "acctestcert%s" + key_vault_id = azurerm_key_vault.test.id + + certificate { + contents = filebase64("testdata/rsa_bundle.pem") + password = "" + } + + certificate_policy { + issuer_parameters { + name = "Self" + } + + key_properties { + exportable = true + key_size = 4096 + key_type = "RSA" + reuse_key = false + } + + secret_properties { + content_type = "application/x-pem-file" + } + } +} +`, r.template(data), data.RandomString) +} + func (r KeyVaultCertificateResource) requiresImport(data acceptance.TestData) string { return fmt.Sprintf(` %s diff --git a/azurerm/internal/services/keyvault/key_vault_key_resource.go b/azurerm/internal/services/keyvault/key_vault_key_resource.go index a2a06686eeed..e005b2ab4192 100644 --- a/azurerm/internal/services/keyvault/key_vault_key_resource.go +++ b/azurerm/internal/services/keyvault/key_vault_key_resource.go @@ -10,7 +10,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/date" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/keyvault/parse" @@ -24,13 +23,11 @@ import ( func resourceKeyVaultKey() *pluginsdk.Resource { return &pluginsdk.Resource{ - Create: resourceKeyVaultKeyCreate, - Read: resourceKeyVaultKeyRead, - Update: resourceKeyVaultKeyUpdate, - Delete: resourceKeyVaultKeyDelete, - Importer: &schema.ResourceImporter{ - State: nestedItemResourceImporter, - }, + Create: resourceKeyVaultKeyCreate, + Read: resourceKeyVaultKeyRead, + Update: resourceKeyVaultKeyUpdate, + Delete: resourceKeyVaultKeyDelete, + Importer: pluginsdk.DefaultImporter(), Timeouts: &pluginsdk.ResourceTimeout{ Create: pluginsdk.DefaultTimeout(30 * time.Minute), @@ -252,7 +249,7 @@ func resourceKeyVaultKeyCreate(d *pluginsdk.ResourceData, meta interface{}) erro Timeout: d.Timeout(pluginsdk.TimeoutCreate), } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("Error waiting for Key Vault Secret %q to become available: %s", name, err) } log.Printf("[DEBUG] Key %q recovered with ID: %q", name, *kid) diff --git a/azurerm/internal/services/keyvault/key_vault_resource.go b/azurerm/internal/services/keyvault/key_vault_resource.go index 9ec2c80fc65e..4e8bfb479540 100644 --- a/azurerm/internal/services/keyvault/key_vault_resource.go +++ b/azurerm/internal/services/keyvault/key_vault_resource.go @@ -382,7 +382,7 @@ func resourceKeyVaultCreate(d *pluginsdk.ResourceData, meta interface{}) error { Timeout: d.Timeout(pluginsdk.TimeoutCreate), } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("Error waiting for %s to become available: %s", id, err) } } diff --git a/azurerm/internal/services/keyvault/key_vault_resource_test.go b/azurerm/internal/services/keyvault/key_vault_resource_test.go index f59b85020fc2..85d645a1fb10 100644 --- a/azurerm/internal/services/keyvault/key_vault_resource_test.go +++ b/azurerm/internal/services/keyvault/key_vault_resource_test.go @@ -6,11 +6,10 @@ import ( "regexp" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/keyvault/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/keyvault/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) diff --git a/azurerm/internal/services/keyvault/key_vault_secret_resource.go b/azurerm/internal/services/keyvault/key_vault_secret_resource.go index 3cf4e3b0620d..08e7b0013ea6 100644 --- a/azurerm/internal/services/keyvault/key_vault_secret_resource.go +++ b/azurerm/internal/services/keyvault/key_vault_secret_resource.go @@ -9,7 +9,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/date" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/keyvault/parse" @@ -27,9 +26,10 @@ func resourceKeyVaultSecret() *pluginsdk.Resource { Read: resourceKeyVaultSecretRead, Update: resourceKeyVaultSecretUpdate, Delete: resourceKeyVaultSecretDelete, - Importer: &schema.ResourceImporter{ - State: nestedItemResourceImporter, - }, + Importer: pluginsdk.ImporterValidatingResourceIdThen(func(id string) error { + _, err := parse.ParseNestedItemID(id) + return err + }, nestedItemResourceImporter), Timeouts: &pluginsdk.ResourceTimeout{ Create: pluginsdk.DefaultTimeout(30 * time.Minute), @@ -165,7 +165,7 @@ func resourceKeyVaultSecretCreate(d *pluginsdk.ResourceData, meta interface{}) e Timeout: d.Timeout(pluginsdk.TimeoutCreate), } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("Error waiting for Key Vault Secret %q to become available: %s", name, err) } log.Printf("[DEBUG] Secret %q recovered with ID: %q", name, *recoveredSecret.ID) diff --git a/azurerm/internal/services/keyvault/key_vault_secret_resource_test.go b/azurerm/internal/services/keyvault/key_vault_secret_resource_test.go index f4329d5730df..7720e27a9969 100644 --- a/azurerm/internal/services/keyvault/key_vault_secret_resource_test.go +++ b/azurerm/internal/services/keyvault/key_vault_secret_resource_test.go @@ -550,6 +550,7 @@ resource "azurerm_key_vault" "test" { secret_permissions = [ "Get", "Delete", + "List", "Purge", "Recover", "Set", diff --git a/azurerm/internal/services/keyvault/key_vault_secrets_data_source.go b/azurerm/internal/services/keyvault/key_vault_secrets_data_source.go new file mode 100644 index 000000000000..db8b35cea3fc --- /dev/null +++ b/azurerm/internal/services/keyvault/key_vault_secrets_data_source.go @@ -0,0 +1,101 @@ +package keyvault + +import ( + "fmt" + "net/url" + "strings" + "time" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/keyvault/parse" + keyVaultValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/keyvault/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func dataSourceKeyVaultSecrets() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Read: dataSourceKeyVaultSecretsRead, + + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + }, + + Schema: map[string]*pluginsdk.Schema{ + "key_vault_id": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: keyVaultValidate.VaultID, + }, + + "names": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + }, + } +} + +func dataSourceKeyVaultSecretsRead(d *pluginsdk.ResourceData, meta interface{}) error { + keyVaultsClient := meta.(*clients.Client).KeyVault + client := meta.(*clients.Client).KeyVault.ManagementClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + keyVaultId, err := parse.VaultID(d.Get("key_vault_id").(string)) + if err != nil { + return err + } + + keyVaultBaseUri, err := keyVaultsClient.BaseUriForKeyVault(ctx, *keyVaultId) + if err != nil { + return fmt.Errorf("fetching base vault url from id %q: %+v", *keyVaultId, err) + } + + secretList, err := client.GetSecretsComplete(ctx, *keyVaultBaseUri, utils.Int32(25)) + if err != nil { + return fmt.Errorf("Error making Read request on Azure KeyVault %q: %+v", *keyVaultId, err) + } + + d.SetId(keyVaultId.ID()) + + var names []string + + if secretList.Response().Value != nil { + for secretList.NotDone() { + for _, v := range *secretList.Response().Value { + name, err := parseNameFromSecretUrl(*v.ID) + if err != nil { + return err + } + names = append(names, *name) + err = secretList.NextWithContext(ctx) + if err != nil { + return fmt.Errorf("listing secrets on Azure KeyVault %q: %+v", *keyVaultId, err) + } + } + } + } + + d.Set("names", names) + d.Set("key_vault_id", keyVaultId.ID()) + + return nil +} + +func parseNameFromSecretUrl(input string) (*string, error) { + uri, err := url.Parse(input) + if err != nil { + return nil, err + } + // https://favoretti-keyvault.vault.azure.net/secrets/secret-name + segments := strings.Split(uri.Path, "/") + if len(segments) != 3 { + return nil, fmt.Errorf("expected a Path in the format `/secrets/secret-name` but got %q", uri.Path) + } + return &segments[2], nil +} diff --git a/azurerm/internal/services/keyvault/key_vault_secrets_data_source_test.go b/azurerm/internal/services/keyvault/key_vault_secrets_data_source_test.go new file mode 100644 index 000000000000..36611440ea8f --- /dev/null +++ b/azurerm/internal/services/keyvault/key_vault_secrets_data_source_test.go @@ -0,0 +1,45 @@ +package keyvault_test + +import ( + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" +) + +type KeyVaultSecretsDataSource struct { +} + +func TestAccDataSourceKeyVaultSecrets_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "data.azurerm_key_vault_secrets", "test") + r := KeyVaultSecretsDataSource{} + + data.DataSourceTest(t, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).Key("names.#").HasValue("31"), + ), + }, + }) +} + +func (KeyVaultSecretsDataSource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_key_vault_secret" "test2" { + count = 30 + name = "secret-${count.index}" + value = "rick-and-morty" + key_vault_id = azurerm_key_vault.test.id +} + +data "azurerm_key_vault_secrets" "test" { + key_vault_id = azurerm_key_vault.test.id + + depends_on = [azurerm_key_vault_secret.test, azurerm_key_vault_secret.test2] +} +`, KeyVaultSecretResource{}.basic(data)) +} diff --git a/azurerm/internal/services/keyvault/registration.go b/azurerm/internal/services/keyvault/registration.go index 395e92fbb8c2..3d4dd88157e5 100644 --- a/azurerm/internal/services/keyvault/registration.go +++ b/azurerm/internal/services/keyvault/registration.go @@ -28,6 +28,7 @@ func (r Registration) SupportedDataSources() map[string]*pluginsdk.Resource { "azurerm_key_vault_key": dataSourceKeyVaultKey(), "azurerm_key_vault_managed_hardware_security_module": dataSourceKeyVaultManagedHardwareSecurityModule(), "azurerm_key_vault_secret": dataSourceKeyVaultSecret(), + "azurerm_key_vault_secrets": dataSourceKeyVaultSecrets(), "azurerm_key_vault": dataSourceKeyVault(), } } diff --git a/azurerm/internal/services/keyvault/testdata/ecdsa.pem b/azurerm/internal/services/keyvault/testdata/ecdsa.pem new file mode 100644 index 000000000000..3f50146b781f --- /dev/null +++ b/azurerm/internal/services/keyvault/testdata/ecdsa.pem @@ -0,0 +1,17 @@ +-----BEGIN PRIVATE KEY----- +MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgQdZ3tkJBm474r1ib +ygA1pH4oqT8HJDMhOIIYl3gi0mGhRANCAAQBd8mEMHPljr2cIfCP1DF2wjJBtWTa +GoerxUTOXsxP0infq4XuaLbefm7giLADh4/NDpMCRr3H+ULTV4VNw4Mm +-----END PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIIB2DCCAX4CCQCOZ+rirhZWMDAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJVUzET +MBEGA1UECAwKV2FzaGluZ3RvbjETMBEGA1UEBwwKV2FzaGluZ3RvbjETMBEGA1UE +CgwKVGVzdCBFQ0RTQTEOMAwGA1UECwwFRUNEU0ExFjAUBgNVBAMMDSouZXhhbXBs +ZS5jb20wHhcNMjEwNTI2MTk0MzI2WhcNMzEwNTI0MTk0MzI2WjB0MQswCQYDVQQG +EwJVUzETMBEGA1UECAwKV2FzaGluZ3RvbjETMBEGA1UEBwwKV2FzaGluZ3RvbjET +MBEGA1UECgwKVGVzdCBFQ0RTQTEOMAwGA1UECwwFRUNEU0ExFjAUBgNVBAMMDSou +ZXhhbXBsZS5jb20wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQBd8mEMHPljr2c +IfCP1DF2wjJBtWTaGoerxUTOXsxP0infq4XuaLbefm7giLADh4/NDpMCRr3H+ULT +V4VNw4MmMAoGCCqGSM49BAMCA0gAMEUCIQDC0M7l+pd+tnbwsFlESYtb6EhFhWrv +6cH9m6BlkutD2gIgI6e/CC/kEqZuP2/GiffAYUre8Z7Fc2jJ2Y3wA1bvuIg= +-----END CERTIFICATE----- diff --git a/azurerm/internal/services/keyvault/testdata/ecdsa.pfx b/azurerm/internal/services/keyvault/testdata/ecdsa.pfx new file mode 100644 index 000000000000..90e4ceac2ce6 Binary files /dev/null and b/azurerm/internal/services/keyvault/testdata/ecdsa.pfx differ diff --git a/azurerm/internal/services/keyvault/testdata/rsa_bundle.pem b/azurerm/internal/services/keyvault/testdata/rsa_bundle.pem new file mode 100644 index 000000000000..5b2caa3040aa --- /dev/null +++ b/azurerm/internal/services/keyvault/testdata/rsa_bundle.pem @@ -0,0 +1,106 @@ +-----BEGIN CERTIFICATE----- +MIIFPDCCAyQCCQDdhjtC7nvdCjANBgkqhkiG9w0BAQsFADBgMQswCQYDVQQGEwJV +UzELMAkGA1UECAwCV3MxCzAJBgNVBAcMAldzMQswCQYDVQQKDAJXczEUMBIGA1UE +CwwLZXhhbXBsZS5jb20xFDASBgNVBAMMC2V4YW1wbGUuY29tMB4XDTIxMDUyNjIw +NTIwMVoXDTMxMDUyNDIwNTIwMVowYDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldz +MQswCQYDVQQHDAJXczELMAkGA1UECgwCV3MxFDASBgNVBAsMC2V4YW1wbGUuY29t +MRQwEgYDVQQDDAtleGFtcGxlLmNvbTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCC +AgoCggIBAMo5Ea1UnCdMworYq/2qJXnOF6KUdhKwftLIqbxdz/M0RpwGhv7zkhXH +60crqT/ch+RV13zzv9NC3r4Y60ThJV8Bi6WwtuOY0gK4Ld3zbejGnckVMQHPyCw9 +99m0B6EWJhFgDEDsV8SkFce69Z8yFEx8tRxv9mG8rgECvyFIlc/wbZYMtEN9dW51 +YxvcB8kJSYQ0ECiKGH9iGP7LSkvdN9wDasze1XaZ4LQbSji832ADW/eIDPfjrkHJ +8iuLYVlbgCmAbB41v0sgGZzG15wKENpZRAbxiUJSlUYjU7LNpAgS2I8rzfusNMZI +Q5Z52MTy9TQ9FBMsuuNa/F7IhG3IWO7Wn1xpSqKdDORwA5TiJ9dLZVapwdu2T1aC +2hXn8fAIJUSlhJSyzOKUwYHaVZYBKS31N8EiUGHJaEkO+YJ/kTIkeh66mooLJ5cs +j4MRixehw1Mqp8cxhTKnV7qeO4r1Bfs59UayTUP7VItTTyeAmfXwfG10iE34wHVN +owz2fdo64ctCXYvt5fGDbI7qV6GnkT+pxbSgj0IVNK8bt1uKCNshWw5HsPutEOdZ +BsmJ2safqhT7bvEeuXxDLV9Dgt8DpO3rpYQwY3MIicKQljZGmvnJHoKtHdpwJiFU +mL68XwklzBpCYPtOwA9CpQ/EtydmkMWYnFku159+nFORifae+dsBAgMBAAEwDQYJ +KoZIhvcNAQELBQADggIBAJNmAUyKyu0jK7Wk1JJoMVUAJ0ZYekM54pvFdamgZZ80 +jKS4jvbHhx40Z9SuGG15dDeHWs5WPn/h2R5Knaepsj48Ilu3Ar1L1gcwTklsW2VQ +JN34uwp1kXjk8//ynmbDePjvzG54b7x8N6pEmDG18xoWWroJ0EhoEYYGSZgAOXjR +eJtlBkdtivpFn430ue+Hv8l4TvHN7LbddbhxgPtycBfZ/zpbngVpGWjpWBb2ppoM +0PIG5dHZpxp59Q6RJiVM1J0Kdvet9IqcyLFY864Y2uvAuQ+TUYfLbQ3ZsdGOESRD +qIRr2m1R07zamzL9/RNaDj3kukMXXWCNus+rQaM6p+Lh2wlcXN+qs/c21kas029g +5Iltr9kpnGhHrWndjQEOPNdqu1RbFvCFvV4PVR9pXJTVwF7tvo1h5A98ZZxez6yy +yFgFy9Hj58YjQ1+/0Q2EOpQltW+ItYakPUXJncEoxj0nPi74UIK3gPjw5H47wRV2 +4qWAoAU12Hv0aU6SBoqBUmBnow++s9PNMm60+d7ZvbOoxF/vczfg063UHZUO62WR +bMAmKcfWfhBVbDwQRf9U7qyWCvRUnMNawvs6uZl4XQcUZyQvMG+LAagjys4DnwTN +US0QEeHw8nKI9WpXXqVHcLGDcFDVEHnVvDw83DTKUe8edzvL6NkqQskeF1IzqrqP +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEBTCCAe0CCQCCo3z4hND0RTANBgkqhkiG9w0BAQsFADBCMQswCQYDVQQGEwJV +UzELMAkGA1UECAwCQ0ExFDASBgNVBAoMC015T3JnLCBJbmMuMRAwDgYDVQQDDAdD +QSBUZXN0MB4XDTIxMDUyNjIwNDUwMFoXDTMxMDQwNDIwNDUwMFowRzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgMAkNBMRQwEgYDVQQKDAtNeU9yZywgSW5jLjEVMBMGA1UE +AwwMbXlkb21haW4uY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA +xcat53vju0HeE4O/exh/dfuX9+opjfzAvCqiboCKKIxdxa+Kitq4okMqhqJseu8t +WSo42neJvb4/ZuLu4suWQui//U+9ztoI0Eyf3+ghSH1a1gj3gbDWckgdVqyrIPpq +TQWbsazpRQZbftmvB47gt0WW12VENUaJ7P2Al5n3GTAs23FDOl2DPLYpk8Ys+JXJ +TFOCCS9U3CAGRAvZj7IZnwfPFJhUOZADO+Anvee1/Zwfwf84Fklthp6VmcoDnn9K +yKLoh729iLBo+ER5XF6lCq75B7igASyL4X8lceLKkXZD8nPIHfCtR+eOyLjbw7FQ +OOyU+Q+ipWDq0KEcsAWXCwIDAQABMA0GCSqGSIb3DQEBCwUAA4ICAQAkh8HXKFBL +8+jd6UivdQ+lfJhxAiu0qNzF+GOXShVJaSZmsG28QK0eneFf8c8TKZ0tQH/pzWM3 +Wa9v6A6RgCdUL8a6dIzrS7zX3gT5HwAHngFo47cOY5/EtoLzTwayCXRBA27BUDjM ++TIJa1Fjo3LRwk0kd1NYt5nBKeaLNF8PdGRviqidSAm9pgCccT6R/jH9UR4v1QbT +W0NFosyBJ3v4J5vKAJZ2STiBh15Ncm/SCPQRLshr4rb+gVD8nvDx5XG2wtSY7INU +iLsenK1up3pMZL6//MA3nfRAX/1BjGd5lZMmZX0azUIC842y42bS37eytNT+BhTJ +zzMuvMjy21sNnKstC0qvkpI2cnMqRBsed5bTT2fKcGDebhl0dT/zKYR2SrqnjqeF +sB2oQcxjFqoqPbmnaFeBL+Zp/XGtbbK/xayKg/n5AoqSocUDcCZtnjKa7RFLTaZA +eOEQR6SvsPIocNdkI5/gPnyjsS6gF32lE3dSMeAM/CErHdfXgQYSSy6KKVKeMSw4 +8wFou8ADDlDpvYAyJ/NQ2RKYtaWzMcuVA9QnEi2+jjW5knFcW2rF1k26PKnLO2az +wRHoKgAPuao10LSqZy6UJu8RxblaiyxCvxxqZkBJfZrQY8Q2qNjwXDcfBXQEAGPN +/F57zMestr/rmbR6r1tT0lZbgiBxc0urRw== +-----END CERTIFICATE----- +-----BEGIN PRIVATE KEY----- +MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDKORGtVJwnTMKK +2Kv9qiV5zheilHYSsH7SyKm8Xc/zNEacBob+85IVx+tHK6k/3IfkVdd887/TQt6+ +GOtE4SVfAYulsLbjmNICuC3d823oxp3JFTEBz8gsPffZtAehFiYRYAxA7FfEpBXH +uvWfMhRMfLUcb/ZhvK4BAr8hSJXP8G2WDLRDfXVudWMb3AfJCUmENBAoihh/Yhj+ +y0pL3TfcA2rM3tV2meC0G0o4vN9gA1v3iAz3465ByfIri2FZW4ApgGweNb9LIBmc +xtecChDaWUQG8YlCUpVGI1OyzaQIEtiPK837rDTGSEOWedjE8vU0PRQTLLrjWvxe +yIRtyFju1p9caUqinQzkcAOU4ifXS2VWqcHbtk9WgtoV5/HwCCVEpYSUsszilMGB +2lWWASkt9TfBIlBhyWhJDvmCf5EyJHoeupqKCyeXLI+DEYsXocNTKqfHMYUyp1e6 +njuK9QX7OfVGsk1D+1SLU08ngJn18HxtdIhN+MB1TaMM9n3aOuHLQl2L7eXxg2yO +6lehp5E/qcW0oI9CFTSvG7dbigjbIVsOR7D7rRDnWQbJidrGn6oU+27xHrl8Qy1f +Q4LfA6Tt66WEMGNzCInCkJY2Rpr5yR6CrR3acCYhVJi+vF8JJcwaQmD7TsAPQqUP +xLcnZpDFmJxZLteffpxTkYn2nvnbAQIDAQABAoICAQDIW/TkvHEfCxGxie4WnY8V ++Rk3z0qRR+fQJlruNDDGMaRIELuOPXwfMn4tfNfrWzAVDuw7dDI6OgYBaSUpkL17 +PUfEeasrI43ofd2MDBV4zdklk2aD+f/Fvfwk6hepBg3fpz9c0A5B2aVuyBF2kqLE +CcdUDqaZvdzVSYhfjNBKP1gElD/CorpIH+x7fO0FxbreTss3r2OISoSjZn4NXnhq +98PIinwLT3jwWh2Wy6OyOIHVZiobLa3IpzLW6/kfFOaNuBznba9D2uElkWmsw9hg +zpL1Ijkxyir2XCggf1knWqzQZcFnuv5q+sxkegNJV8OU53NNcEMW4l4r9xYmw5s/ +/bIO0SlWiOvl5cWiTkF4gjc/bSlgZYhBSLsSVt/tvdm5vk9uhnclUQCTq1Pwkn5X +ddlAT4dd2SkLvUv2hJbr+aoTI3PAQkzf7Wbu+prQ9oLbFysxbD+ghjcugejiVZcZ +381h43RuCudk1O3j+xcS/Fja3cDKp+RODl38sBVDj2MAhxparxu51A09FvyKY3Hh +uOkqAAwL6wevVBdCaihXYivxZkUMeeEydwtFJq+4L0lr27+VNYmP/AP7m4aZHmWr +k9MBkc6B9kH+5ZPqjC1JM/26OKjg1a67POqcXjvAZsGkQOU5+ThSuUQ1CCtf5qFQ +XHHFL4jun/mHuRMV3LxLcQKCAQEA6u88XLtLaz8EmxZjW1tFFNxzyYN2UUY812/T +68Ga7H5GMfedPNx9BsNINUM75L3vExtHVMdYdDtDrJE1ZmL+nk8WytVjC/SQ+8uc +D8xLI9pclYqwjI1L9xu5uJ36OBO1eMoD0mLg94InVHArgCCuSCg88oLpF2RPi24Z +2nUFCGS0QM0KNnWg4O6rHiIJY4LDoXZFe39I4uQ8y3dOUHbskOMBXY/bRWZQOfFX +pW1G+nwFFKg4v2DT2OsDfVal6USg3IgBBxc+2f5rNWsbMJ2/CAzWZndZ8xE5VZxk +n9seKCgdNW4WjNLahagPcUb/FH6JvoDDPmittsyJq8Lqi35O1QKCAQEA3Fr17yPj +ZcnjoDFzOakY5BPnsCta/Jx7ALHGXxL8XdhxVR0ZH1S5FpseiE37q4hurX/WsIwU +Hw+pVDs1EQWLMRSPM9ukQjK0NWHq0zNegIFBtAii4NfigIfvBm12L+6u1ceNUm/R +0HkAta7ewp2E8UMkI0aQ+47lWNCo0vzSQuw85bYESIWk8EoSmjs+Mel5sQDmP2Tz +ZJE2gpwVf6Q6Herm308xKjcr6VANv5XY0DTNooR/bhmjc/5R42DPUzlqffL+nkiv +H1yfJ7cYtElCdfHJ9i/fkXpiGfQGNgvvvlPQML67BStMDE3lkwyVtbrC0kP7dkMO +PfHkh+ghEBhpfQKCAQEAzb9UutWgeD0JWJUj0Jc/CcUDdmC2VvlPQVGDY/37ItB3 +wzzarLlgmXKeo17NbkdUwAPs1Xd7pNdeLhxBivh5isUy7dIxWwAFlS687yz4RV7w +JFsAwSbhd3kFjoRz5XaFQFKTREYqRTjsHsvZnO2CFFmA/tXdMwo7i52R8Lr4/F/N +aM7o+cbMbRE4NTyTwl173MTKlxxjBanjAEHtCRUNmtnjkT/yRXYUHpqZKpxoQ+gn +Pu8QO7Kd9WPke3lN9duPkXNeiwo1lg51b6PsfQg5i+BKSBSQZwoXjz1W9pScdYik +A+30u8HRdKo5U/hcXmNvWLJFyzs7n1LD4vPiGuY0PQKCAQEAjdC0qt2MAfx21Kg+ +ogNkE5WD7OtaOW1z9DR1mMadSMB93+tEOdfK7vOSKOMXb4P0xxCZWTIt2Pe5YI5i +Trwkose6igN3qWr8c8a63IYY8dc+M+HDAbWu/k191cMo5xxTxR4So9V3URYvEL6k +etfpsWQmo9VMhbmnfQt9O5yerZdgce48v3fTeRoS8tPh6Tl7qiEMDgyoYDbcwB43 +79f/1zVQtLNAzwyG5DczQfPsf8Mb/NYNNQynF4W7qfyqMcOn97slhzT8D9EKVQMP +kSf87+9WrWyOFBCRhmZ4gNwZxYInYNe/gUomPVz8vTDCQR0bOTurYKczSUaeb6FD +dhpf+QKCAQBYXfgKEZDgZEzUbNF3NwWBUGQS9bXNBF8LJKqCwE/5TlGL0E5Q3O4i +T1YJMmEThFnpYSu6qhnllAkymGdpTK8zpYxhrcaMnHrfe5drxUBJJbWAXAYrVGRZ +obOUZpHL6hevp+PHNNkB/ZxWCxRrMpaCOGCok7y09xzxXdqmMNW0ukgNS+t2b8Du +/PVjNMcfUpyfHxLaTRt5K/Mm05YmgcQAbhRIA8Bvxk/eCNRD4mnI9bIR9qb3g0Bd +2GubslVWKcflDW7i9dL2EOKCXBMVzJFV6X7xTSy8nFLKTCoODFfjkWVJQjUc1DiK +2Ba/LREu21ZrbuucY2TXPupiRWe3IVZA +-----END PRIVATE KEY----- diff --git a/azurerm/internal/services/keyvault/testdata/rsa_bundle.pfx b/azurerm/internal/services/keyvault/testdata/rsa_bundle.pfx new file mode 100644 index 000000000000..d663ab7da25c Binary files /dev/null and b/azurerm/internal/services/keyvault/testdata/rsa_bundle.pfx differ diff --git a/azurerm/internal/services/keyvault/testdata/rsa_single.pem b/azurerm/internal/services/keyvault/testdata/rsa_single.pem new file mode 100644 index 000000000000..a148f52ad13d --- /dev/null +++ b/azurerm/internal/services/keyvault/testdata/rsa_single.pem @@ -0,0 +1,82 @@ +-----BEGIN CERTIFICATE----- +MIIFPDCCAyQCCQDdhjtC7nvdCjANBgkqhkiG9w0BAQsFADBgMQswCQYDVQQGEwJV +UzELMAkGA1UECAwCV3MxCzAJBgNVBAcMAldzMQswCQYDVQQKDAJXczEUMBIGA1UE +CwwLZXhhbXBsZS5jb20xFDASBgNVBAMMC2V4YW1wbGUuY29tMB4XDTIxMDUyNjIw +NTIwMVoXDTMxMDUyNDIwNTIwMVowYDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldz +MQswCQYDVQQHDAJXczELMAkGA1UECgwCV3MxFDASBgNVBAsMC2V4YW1wbGUuY29t +MRQwEgYDVQQDDAtleGFtcGxlLmNvbTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCC +AgoCggIBAMo5Ea1UnCdMworYq/2qJXnOF6KUdhKwftLIqbxdz/M0RpwGhv7zkhXH +60crqT/ch+RV13zzv9NC3r4Y60ThJV8Bi6WwtuOY0gK4Ld3zbejGnckVMQHPyCw9 +99m0B6EWJhFgDEDsV8SkFce69Z8yFEx8tRxv9mG8rgECvyFIlc/wbZYMtEN9dW51 +YxvcB8kJSYQ0ECiKGH9iGP7LSkvdN9wDasze1XaZ4LQbSji832ADW/eIDPfjrkHJ +8iuLYVlbgCmAbB41v0sgGZzG15wKENpZRAbxiUJSlUYjU7LNpAgS2I8rzfusNMZI +Q5Z52MTy9TQ9FBMsuuNa/F7IhG3IWO7Wn1xpSqKdDORwA5TiJ9dLZVapwdu2T1aC +2hXn8fAIJUSlhJSyzOKUwYHaVZYBKS31N8EiUGHJaEkO+YJ/kTIkeh66mooLJ5cs +j4MRixehw1Mqp8cxhTKnV7qeO4r1Bfs59UayTUP7VItTTyeAmfXwfG10iE34wHVN +owz2fdo64ctCXYvt5fGDbI7qV6GnkT+pxbSgj0IVNK8bt1uKCNshWw5HsPutEOdZ +BsmJ2safqhT7bvEeuXxDLV9Dgt8DpO3rpYQwY3MIicKQljZGmvnJHoKtHdpwJiFU +mL68XwklzBpCYPtOwA9CpQ/EtydmkMWYnFku159+nFORifae+dsBAgMBAAEwDQYJ +KoZIhvcNAQELBQADggIBAJNmAUyKyu0jK7Wk1JJoMVUAJ0ZYekM54pvFdamgZZ80 +jKS4jvbHhx40Z9SuGG15dDeHWs5WPn/h2R5Knaepsj48Ilu3Ar1L1gcwTklsW2VQ +JN34uwp1kXjk8//ynmbDePjvzG54b7x8N6pEmDG18xoWWroJ0EhoEYYGSZgAOXjR +eJtlBkdtivpFn430ue+Hv8l4TvHN7LbddbhxgPtycBfZ/zpbngVpGWjpWBb2ppoM +0PIG5dHZpxp59Q6RJiVM1J0Kdvet9IqcyLFY864Y2uvAuQ+TUYfLbQ3ZsdGOESRD +qIRr2m1R07zamzL9/RNaDj3kukMXXWCNus+rQaM6p+Lh2wlcXN+qs/c21kas029g +5Iltr9kpnGhHrWndjQEOPNdqu1RbFvCFvV4PVR9pXJTVwF7tvo1h5A98ZZxez6yy +yFgFy9Hj58YjQ1+/0Q2EOpQltW+ItYakPUXJncEoxj0nPi74UIK3gPjw5H47wRV2 +4qWAoAU12Hv0aU6SBoqBUmBnow++s9PNMm60+d7ZvbOoxF/vczfg063UHZUO62WR +bMAmKcfWfhBVbDwQRf9U7qyWCvRUnMNawvs6uZl4XQcUZyQvMG+LAagjys4DnwTN +US0QEeHw8nKI9WpXXqVHcLGDcFDVEHnVvDw83DTKUe8edzvL6NkqQskeF1IzqrqP +-----END CERTIFICATE----- +-----BEGIN PRIVATE KEY----- +MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDKORGtVJwnTMKK +2Kv9qiV5zheilHYSsH7SyKm8Xc/zNEacBob+85IVx+tHK6k/3IfkVdd887/TQt6+ +GOtE4SVfAYulsLbjmNICuC3d823oxp3JFTEBz8gsPffZtAehFiYRYAxA7FfEpBXH +uvWfMhRMfLUcb/ZhvK4BAr8hSJXP8G2WDLRDfXVudWMb3AfJCUmENBAoihh/Yhj+ +y0pL3TfcA2rM3tV2meC0G0o4vN9gA1v3iAz3465ByfIri2FZW4ApgGweNb9LIBmc +xtecChDaWUQG8YlCUpVGI1OyzaQIEtiPK837rDTGSEOWedjE8vU0PRQTLLrjWvxe +yIRtyFju1p9caUqinQzkcAOU4ifXS2VWqcHbtk9WgtoV5/HwCCVEpYSUsszilMGB +2lWWASkt9TfBIlBhyWhJDvmCf5EyJHoeupqKCyeXLI+DEYsXocNTKqfHMYUyp1e6 +njuK9QX7OfVGsk1D+1SLU08ngJn18HxtdIhN+MB1TaMM9n3aOuHLQl2L7eXxg2yO +6lehp5E/qcW0oI9CFTSvG7dbigjbIVsOR7D7rRDnWQbJidrGn6oU+27xHrl8Qy1f +Q4LfA6Tt66WEMGNzCInCkJY2Rpr5yR6CrR3acCYhVJi+vF8JJcwaQmD7TsAPQqUP +xLcnZpDFmJxZLteffpxTkYn2nvnbAQIDAQABAoICAQDIW/TkvHEfCxGxie4WnY8V ++Rk3z0qRR+fQJlruNDDGMaRIELuOPXwfMn4tfNfrWzAVDuw7dDI6OgYBaSUpkL17 +PUfEeasrI43ofd2MDBV4zdklk2aD+f/Fvfwk6hepBg3fpz9c0A5B2aVuyBF2kqLE +CcdUDqaZvdzVSYhfjNBKP1gElD/CorpIH+x7fO0FxbreTss3r2OISoSjZn4NXnhq +98PIinwLT3jwWh2Wy6OyOIHVZiobLa3IpzLW6/kfFOaNuBznba9D2uElkWmsw9hg +zpL1Ijkxyir2XCggf1knWqzQZcFnuv5q+sxkegNJV8OU53NNcEMW4l4r9xYmw5s/ +/bIO0SlWiOvl5cWiTkF4gjc/bSlgZYhBSLsSVt/tvdm5vk9uhnclUQCTq1Pwkn5X +ddlAT4dd2SkLvUv2hJbr+aoTI3PAQkzf7Wbu+prQ9oLbFysxbD+ghjcugejiVZcZ +381h43RuCudk1O3j+xcS/Fja3cDKp+RODl38sBVDj2MAhxparxu51A09FvyKY3Hh +uOkqAAwL6wevVBdCaihXYivxZkUMeeEydwtFJq+4L0lr27+VNYmP/AP7m4aZHmWr +k9MBkc6B9kH+5ZPqjC1JM/26OKjg1a67POqcXjvAZsGkQOU5+ThSuUQ1CCtf5qFQ +XHHFL4jun/mHuRMV3LxLcQKCAQEA6u88XLtLaz8EmxZjW1tFFNxzyYN2UUY812/T +68Ga7H5GMfedPNx9BsNINUM75L3vExtHVMdYdDtDrJE1ZmL+nk8WytVjC/SQ+8uc +D8xLI9pclYqwjI1L9xu5uJ36OBO1eMoD0mLg94InVHArgCCuSCg88oLpF2RPi24Z +2nUFCGS0QM0KNnWg4O6rHiIJY4LDoXZFe39I4uQ8y3dOUHbskOMBXY/bRWZQOfFX +pW1G+nwFFKg4v2DT2OsDfVal6USg3IgBBxc+2f5rNWsbMJ2/CAzWZndZ8xE5VZxk +n9seKCgdNW4WjNLahagPcUb/FH6JvoDDPmittsyJq8Lqi35O1QKCAQEA3Fr17yPj +ZcnjoDFzOakY5BPnsCta/Jx7ALHGXxL8XdhxVR0ZH1S5FpseiE37q4hurX/WsIwU +Hw+pVDs1EQWLMRSPM9ukQjK0NWHq0zNegIFBtAii4NfigIfvBm12L+6u1ceNUm/R +0HkAta7ewp2E8UMkI0aQ+47lWNCo0vzSQuw85bYESIWk8EoSmjs+Mel5sQDmP2Tz +ZJE2gpwVf6Q6Herm308xKjcr6VANv5XY0DTNooR/bhmjc/5R42DPUzlqffL+nkiv +H1yfJ7cYtElCdfHJ9i/fkXpiGfQGNgvvvlPQML67BStMDE3lkwyVtbrC0kP7dkMO +PfHkh+ghEBhpfQKCAQEAzb9UutWgeD0JWJUj0Jc/CcUDdmC2VvlPQVGDY/37ItB3 +wzzarLlgmXKeo17NbkdUwAPs1Xd7pNdeLhxBivh5isUy7dIxWwAFlS687yz4RV7w +JFsAwSbhd3kFjoRz5XaFQFKTREYqRTjsHsvZnO2CFFmA/tXdMwo7i52R8Lr4/F/N +aM7o+cbMbRE4NTyTwl173MTKlxxjBanjAEHtCRUNmtnjkT/yRXYUHpqZKpxoQ+gn +Pu8QO7Kd9WPke3lN9duPkXNeiwo1lg51b6PsfQg5i+BKSBSQZwoXjz1W9pScdYik +A+30u8HRdKo5U/hcXmNvWLJFyzs7n1LD4vPiGuY0PQKCAQEAjdC0qt2MAfx21Kg+ +ogNkE5WD7OtaOW1z9DR1mMadSMB93+tEOdfK7vOSKOMXb4P0xxCZWTIt2Pe5YI5i +Trwkose6igN3qWr8c8a63IYY8dc+M+HDAbWu/k191cMo5xxTxR4So9V3URYvEL6k +etfpsWQmo9VMhbmnfQt9O5yerZdgce48v3fTeRoS8tPh6Tl7qiEMDgyoYDbcwB43 +79f/1zVQtLNAzwyG5DczQfPsf8Mb/NYNNQynF4W7qfyqMcOn97slhzT8D9EKVQMP +kSf87+9WrWyOFBCRhmZ4gNwZxYInYNe/gUomPVz8vTDCQR0bOTurYKczSUaeb6FD +dhpf+QKCAQBYXfgKEZDgZEzUbNF3NwWBUGQS9bXNBF8LJKqCwE/5TlGL0E5Q3O4i +T1YJMmEThFnpYSu6qhnllAkymGdpTK8zpYxhrcaMnHrfe5drxUBJJbWAXAYrVGRZ +obOUZpHL6hevp+PHNNkB/ZxWCxRrMpaCOGCok7y09xzxXdqmMNW0ukgNS+t2b8Du +/PVjNMcfUpyfHxLaTRt5K/Mm05YmgcQAbhRIA8Bvxk/eCNRD4mnI9bIR9qb3g0Bd +2GubslVWKcflDW7i9dL2EOKCXBMVzJFV6X7xTSy8nFLKTCoODFfjkWVJQjUc1DiK +2Ba/LREu21ZrbuucY2TXPupiRWe3IVZA +-----END PRIVATE KEY----- diff --git a/azurerm/internal/services/kusto/kusto_cluster_customer_managed_key_resource.go b/azurerm/internal/services/kusto/kusto_cluster_customer_managed_key_resource.go index 6babc58476a1..fb74efcc9a8d 100644 --- a/azurerm/internal/services/kusto/kusto_cluster_customer_managed_key_resource.go +++ b/azurerm/internal/services/kusto/kusto_cluster_customer_managed_key_resource.go @@ -13,6 +13,7 @@ import ( keyVaultValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/keyvault/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/kusto/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/kusto/validate" + msiValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/msi/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" @@ -62,6 +63,12 @@ func resourceKustoClusterCustomerManagedKey() *pluginsdk.Resource { Required: true, ValidateFunc: validation.StringIsNotEmpty, }, + + "user_identity": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: msiValidate.UserAssignedIdentityID, + }, }, } } @@ -143,6 +150,10 @@ func resourceKustoClusterCustomerManagedKeyCreateUpdate(d *pluginsdk.ResourceDat }, } + if v, ok := d.GetOk("user_identity"); ok { + props.ClusterProperties.KeyVaultProperties.UserIdentity = utils.String(v.(string)) + } + future, err := clusterClient.Update(ctx, clusterID.ResourceGroup, clusterID.Name, props) if err != nil { return fmt.Errorf("Error updating Customer Managed Key for Kusto Cluster %q (Resource Group %q): %+v", clusterID.Name, clusterID.ResourceGroup, err) @@ -192,6 +203,7 @@ func resourceKustoClusterCustomerManagedKeyRead(d *pluginsdk.ResourceData, meta keyName := "" keyVaultURI := "" keyVersion := "" + userIdentity := "" if props != nil { if props.KeyName != nil { keyName = *props.KeyName @@ -202,6 +214,9 @@ func resourceKustoClusterCustomerManagedKeyRead(d *pluginsdk.ResourceData, meta if props.KeyVersion != nil { keyVersion = *props.KeyVersion } + if props.UserIdentity != nil { + userIdentity = *props.UserIdentity + } } if keyVaultURI == "" { @@ -218,7 +233,7 @@ func resourceKustoClusterCustomerManagedKeyRead(d *pluginsdk.ResourceData, meta d.Set("key_vault_id", keyVaultID) d.Set("key_name", keyName) d.Set("key_version", keyVersion) - + d.Set("user_identity", userIdentity) return nil } diff --git a/azurerm/internal/services/kusto/kusto_cluster_customer_managed_key_test.go b/azurerm/internal/services/kusto/kusto_cluster_customer_managed_key_test.go index aa79319febf7..85b9845f5fc8 100644 --- a/azurerm/internal/services/kusto/kusto_cluster_customer_managed_key_test.go +++ b/azurerm/internal/services/kusto/kusto_cluster_customer_managed_key_test.go @@ -86,6 +86,21 @@ func TestAccKustoClusterCustomerManagedKey_updateKey(t *testing.T) { }) } +func TestAccKustoClusterCustomerManagedKey_userIdentity(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kusto_cluster_customer_managed_key", "test") + r := KustoClusterCustomerManagedKeyResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.userIdentity(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func (KustoClusterCustomerManagedKeyResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.ClusterID(state.ID) if err != nil { @@ -159,6 +174,101 @@ resource "azurerm_kusto_cluster_customer_managed_key" "test" { `, template) } +func (KustoClusterCustomerManagedKeyResource) userIdentity(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features { + key_vault { + purge_soft_delete_on_destroy = false + } + } +} + +data "azurerm_client_config" "current" {} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_user_assigned_identity" "test" { + name = "acctest%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location +} + +resource "azurerm_kusto_cluster" "test" { + name = "acctestkc%s" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + sku { + name = "Dev(No SLA)_Standard_D11_v2" + capacity = 1 + } + + identity { + type = "UserAssigned" + identity_ids = [azurerm_user_assigned_identity.test.id] + } +} + +resource "azurerm_key_vault" "test" { + name = "acctestkv%s" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + tenant_id = data.azurerm_client_config.current.tenant_id + sku_name = "standard" + soft_delete_enabled = true + purge_protection_enabled = true +} + +resource "azurerm_key_vault_access_policy" "cluster" { + key_vault_id = azurerm_key_vault.test.id + tenant_id = data.azurerm_client_config.current.tenant_id + object_id = azurerm_user_assigned_identity.test.principal_id + + key_permissions = ["get", "unwrapkey", "wrapkey"] +} + +resource "azurerm_key_vault_access_policy" "client" { + key_vault_id = azurerm_key_vault.test.id + tenant_id = data.azurerm_client_config.current.tenant_id + object_id = data.azurerm_client_config.current.object_id + + key_permissions = [ + "create", + "delete", + "get", + "list", + "purge", + "recover", + ] +} + +resource "azurerm_key_vault_key" "test" { + name = "test" + key_vault_id = azurerm_key_vault.test.id + key_type = "RSA" + key_size = 2048 + key_opts = ["decrypt", "encrypt", "sign", "unwrapKey", "verify", "wrapKey"] + + depends_on = [ + azurerm_key_vault_access_policy.client, + azurerm_key_vault_access_policy.cluster, + ] +} + +resource "azurerm_kusto_cluster_customer_managed_key" "test" { + cluster_id = azurerm_kusto_cluster.test.id + key_vault_id = azurerm_key_vault.test.id + key_name = azurerm_key_vault_key.test.name + key_version = azurerm_key_vault_key.test.version + user_identity = azurerm_user_assigned_identity.test.id +} +`, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomString, data.RandomString) +} + func (KustoClusterCustomerManagedKeyResource) template(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { diff --git a/azurerm/internal/services/kusto/kusto_eventgrid_data_connection_resource.go b/azurerm/internal/services/kusto/kusto_eventgrid_data_connection_resource.go index e20992a43ac1..9912a4705034 100644 --- a/azurerm/internal/services/kusto/kusto_eventgrid_data_connection_resource.go +++ b/azurerm/internal/services/kusto/kusto_eventgrid_data_connection_resource.go @@ -118,17 +118,22 @@ func resourceKustoEventGridDataConnection() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ + string(kusto.APACHEAVRO), string(kusto.AVRO), string(kusto.CSV), string(kusto.JSON), string(kusto.MULTIJSON), + string(kusto.ORC), + string(kusto.PARQUET), string(kusto.PSV), string(kusto.RAW), string(kusto.SCSV), string(kusto.SINGLEJSON), string(kusto.SOHSV), string(kusto.TSV), + string(kusto.TSVE), string(kusto.TXT), + string(kusto.W3CLOGFILE), }, false), }, }, diff --git a/azurerm/internal/services/kusto/kusto_eventhub_data_connection_resource.go b/azurerm/internal/services/kusto/kusto_eventhub_data_connection_resource.go index 4750933a4b5a..80979650cc4b 100644 --- a/azurerm/internal/services/kusto/kusto_eventhub_data_connection_resource.go +++ b/azurerm/internal/services/kusto/kusto_eventhub_data_connection_resource.go @@ -93,10 +93,12 @@ func resourceKustoEventHubDataConnection() *pluginsdk.Resource { }, "consumer_group": { - Type: pluginsdk.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: eventhubValidate.ValidateEventHubConsumerName(), + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.Any( + eventhubValidate.ValidateEventHubConsumerName(), + validation.StringInSlice([]string{"$Default"}, false)), }, "table_name": { @@ -115,6 +117,7 @@ func resourceKustoEventHubDataConnection() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ + string(kusto.APACHEAVRO), string(kusto.AVRO), string(kusto.CSV), string(kusto.JSON), diff --git a/azurerm/internal/services/kusto/kusto_iothub_data_connection_resource.go b/azurerm/internal/services/kusto/kusto_iothub_data_connection_resource.go index 0ae00520e86b..1270da582bdd 100644 --- a/azurerm/internal/services/kusto/kusto_iothub_data_connection_resource.go +++ b/azurerm/internal/services/kusto/kusto_iothub_data_connection_resource.go @@ -83,6 +83,44 @@ func resourceKustoIotHubDataConnection() *pluginsdk.Resource { ValidateFunc: iothubValidate.IotHubSharedAccessPolicyName, }, + "table_name": { + Type: pluginsdk.TypeString, + ForceNew: true, + Optional: true, + ValidateFunc: validate.EntityName, + }, + + "mapping_rule_name": { + Type: pluginsdk.TypeString, + ForceNew: true, + Optional: true, + ValidateFunc: validate.EntityName, + }, + + "data_format": { + Type: pluginsdk.TypeString, + ForceNew: true, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + string(kusto.IotHubDataFormatAPACHEAVRO), + string(kusto.IotHubDataFormatAVRO), + string(kusto.IotHubDataFormatCSV), + string(kusto.IotHubDataFormatJSON), + string(kusto.IotHubDataFormatMULTIJSON), + string(kusto.IotHubDataFormatORC), + string(kusto.IotHubDataFormatPARQUET), + string(kusto.IotHubDataFormatPSV), + string(kusto.IotHubDataFormatRAW), + string(kusto.IotHubDataFormatSCSV), + string(kusto.IotHubDataFormatSINGLEJSON), + string(kusto.IotHubDataFormatSOHSV), + string(kusto.IotHubDataFormatTSV), + string(kusto.IotHubDataFormatTSVE), + string(kusto.IotHubDataFormatTXT), + string(kusto.IotHubDataFormatW3CLOGFILE), + }, false), + }, + "event_system_properties": { Type: pluginsdk.TypeSet, Optional: true, @@ -128,17 +166,11 @@ func resourceKustoIotHubDataConnectionCreate(d *pluginsdk.ResourceData, meta int return tf.ImportAsExistsError("azurerm_kusto_iothub_data_connection", id.ID()) } - dataConnection := kusto.IotHubDataConnection{ - Location: utils.String(azure.NormalizeLocation(d.Get("location").(string))), - IotHubConnectionProperties: &kusto.IotHubConnectionProperties{ - IotHubResourceID: utils.String(d.Get("iothub_id").(string)), - ConsumerGroup: utils.String(d.Get("consumer_group").(string)), - SharedAccessPolicyName: utils.String(d.Get("shared_access_policy_name").(string)), - }, - } + iotHubDataConnectionProperties := expandKustoIotHubDataConnectionProperties(d) - if eventSystemProperties, ok := d.GetOk("event_system_properties"); ok { - dataConnection.IotHubConnectionProperties.EventSystemProperties = utils.ExpandStringSlice(eventSystemProperties.(*pluginsdk.Set).List()) + dataConnection := kusto.IotHubDataConnection{ + Location: utils.String(azure.NormalizeLocation(d.Get("location").(string))), + IotHubConnectionProperties: iotHubDataConnectionProperties, } future, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.ClusterName, id.DatabaseName, id.Name, dataConnection) @@ -183,6 +215,9 @@ func resourceKustoIotHubDataConnectionRead(d *pluginsdk.ResourceData, meta inter if props := dataConnection.IotHubConnectionProperties; props != nil { d.Set("iothub_id", props.IotHubResourceID) d.Set("consumer_group", props.ConsumerGroup) + d.Set("table_name", props.TableName) + d.Set("mapping_rule_name", props.MappingRuleName) + d.Set("data_format", props.DataFormat) d.Set("shared_access_policy_name", props.SharedAccessPolicyName) d.Set("event_system_properties", utils.FlattenStringSlice(props.EventSystemProperties)) } @@ -212,3 +247,29 @@ func resourceKustoIotHubDataConnectionDelete(d *pluginsdk.ResourceData, meta int return nil } + +func expandKustoIotHubDataConnectionProperties(d *pluginsdk.ResourceData) *kusto.IotHubConnectionProperties { + iotHubDataConnectionProperties := &kusto.IotHubConnectionProperties{ + IotHubResourceID: utils.String(d.Get("iothub_id").(string)), + ConsumerGroup: utils.String(d.Get("consumer_group").(string)), + SharedAccessPolicyName: utils.String(d.Get("shared_access_policy_name").(string)), + } + + if tableName, ok := d.GetOk("table_name"); ok { + iotHubDataConnectionProperties.TableName = utils.String(tableName.(string)) + } + + if mappingRuleName, ok := d.GetOk("mapping_rule_name"); ok { + iotHubDataConnectionProperties.MappingRuleName = utils.String(mappingRuleName.(string)) + } + + if df, ok := d.GetOk("data_format"); ok { + iotHubDataConnectionProperties.DataFormat = kusto.IotHubDataFormat(df.(string)) + } + + if eventSystemProperties, ok := d.GetOk("event_system_properties"); ok { + iotHubDataConnectionProperties.EventSystemProperties = utils.ExpandStringSlice(eventSystemProperties.(*pluginsdk.Set).List()) + } + + return iotHubDataConnectionProperties +} diff --git a/azurerm/internal/services/kusto/kusto_iothub_data_connection_resource_test.go b/azurerm/internal/services/kusto/kusto_iothub_data_connection_resource_test.go index c805a7722dc3..cc59af8c01fd 100644 --- a/azurerm/internal/services/kusto/kusto_iothub_data_connection_resource_test.go +++ b/azurerm/internal/services/kusto/kusto_iothub_data_connection_resource_test.go @@ -5,13 +5,12 @@ import ( "fmt" "testing" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/kusto/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" - - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) type KustoIotHubDataConnectionResource struct { @@ -32,7 +31,80 @@ func TestAccKustoIotHubDataConnection_basic(t *testing.T) { }) } -func (KustoIotHubDataConnectionResource) basic(data acceptance.TestData) string { +func TestAccKustoIotHubDataConnection_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kusto_iothub_data_connection", "test") + r := KustoIotHubDataConnectionResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func (KustoIotHubDataConnectionResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.DataConnectionID(state.ID) + if err != nil { + return nil, err + } + + resp, err := clients.Kusto.DataConnectionsClient.Get(ctx, id.ResourceGroup, id.ClusterName, id.DatabaseName, id.Name) + if err != nil { + return nil, fmt.Errorf("retrieving %s: %v", id.String(), err) + } + + value, ok := resp.Value.AsIotHubDataConnection() + if !ok { + return nil, fmt.Errorf("%s is not an IotHub Data Connection", id.String()) + } + + return utils.Bool(value.IotHubConnectionProperties != nil), nil +} + +func (r KustoIotHubDataConnectionResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_kusto_iothub_data_connection" "test" { + name = "acctestkedc-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_name = azurerm_kusto_cluster.test.name + database_name = azurerm_kusto_database.test.name + + iothub_id = azurerm_iothub.test.id + consumer_group = azurerm_iothub_consumer_group.test.name + shared_access_policy_name = azurerm_iothub_shared_access_policy.test.name +} +`, r.template(data), data.RandomInteger) +} + +func (r KustoIotHubDataConnectionResource) complete(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_kusto_iothub_data_connection" "test" { + name = "acctestkedc-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_name = azurerm_kusto_cluster.test.name + database_name = azurerm_kusto_database.test.name + + iothub_id = azurerm_iothub.test.id + consumer_group = azurerm_iothub_consumer_group.test.name + shared_access_policy_name = azurerm_iothub_shared_access_policy.test.name + event_system_properties = ["message-id", "sequence-number", "to"] + mapping_rule_name = "Json_Mapping" + data_format = "MULTIJSON" +} +`, r.template(data), data.RandomInteger) +} + +func (KustoIotHubDataConnectionResource) template(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { features {} @@ -90,37 +162,5 @@ resource "azurerm_iothub_consumer_group" "test" { eventhub_endpoint_name = "events" resource_group_name = azurerm_resource_group.test.name } - -resource "azurerm_kusto_iothub_data_connection" "test" { - name = "acctestkedc-%d" - resource_group_name = azurerm_resource_group.test.name - location = azurerm_resource_group.test.location - cluster_name = azurerm_kusto_cluster.test.name - database_name = azurerm_kusto_database.test.name - - iothub_id = azurerm_iothub.test.id - consumer_group = azurerm_iothub_consumer_group.test.name - shared_access_policy_name = azurerm_iothub_shared_access_policy.test.name - event_system_properties = ["message-id", "sequence-number", "to"] -} -`, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomInteger, data.RandomInteger, data.RandomInteger) -} - -func (KustoIotHubDataConnectionResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := parse.DataConnectionID(state.ID) - if err != nil { - return nil, err - } - - resp, err := clients.Kusto.DataConnectionsClient.Get(ctx, id.ResourceGroup, id.ClusterName, id.DatabaseName, id.Name) - if err != nil { - return nil, fmt.Errorf("retrieving %s: %v", id.String(), err) - } - - value, ok := resp.Value.AsIotHubDataConnection() - if !ok { - return nil, fmt.Errorf("%s is not an IotHub Data Connection", id.String()) - } - - return utils.Bool(value.IotHubConnectionProperties != nil), nil +`, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomInteger, data.RandomInteger) } diff --git a/azurerm/internal/services/lighthouse/lighthouse_assignment_resource.go b/azurerm/internal/services/lighthouse/lighthouse_assignment_resource.go index e35eee2e6280..828a78dccd23 100644 --- a/azurerm/internal/services/lighthouse/lighthouse_assignment_resource.go +++ b/azurerm/internal/services/lighthouse/lighthouse_assignment_resource.go @@ -165,7 +165,7 @@ func resourceLighthouseAssignmentDelete(d *pluginsdk.ResourceData, meta interfac Timeout: d.Timeout(pluginsdk.TimeoutDelete), } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("Error waiting for Lighthouse Assignment %q (Scope %q) to be deleted: %s", id.Name, id.Scope, err) } diff --git a/azurerm/internal/services/lighthouse/lighthouse_definition_resource.go b/azurerm/internal/services/lighthouse/lighthouse_definition_resource.go index 4b899ae0dd4a..d933fe059ed3 100644 --- a/azurerm/internal/services/lighthouse/lighthouse_definition_resource.go +++ b/azurerm/internal/services/lighthouse/lighthouse_definition_resource.go @@ -38,6 +38,7 @@ func resourceLighthouseDefinition() *pluginsdk.Resource { "name": { Type: pluginsdk.TypeString, Required: true, + ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, @@ -103,6 +104,39 @@ func resourceLighthouseDefinition() *pluginsdk.Resource { ForceNew: true, ValidateFunc: validation.IsUUID, }, + + "plan": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "publisher": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "product": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "version": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + }, + }, }, } } @@ -146,6 +180,7 @@ func resourceLighthouseDefinitionCreateUpdate(d *pluginsdk.ResourceData, meta in return err } parameters := managedservices.RegistrationDefinition{ + Plan: expandLighthouseDefinitionPlan(d.Get("plan").([]interface{})), Properties: &managedservices.RegistrationDefinitionProperties{ Description: utils.String(d.Get("description").(string)), Authorizations: authorizations, @@ -196,6 +231,10 @@ func resourceLighthouseDefinitionRead(d *pluginsdk.ResourceData, meta interface{ d.Set("lighthouse_definition_id", resp.Name) d.Set("scope", id.Scope) + if err := d.Set("plan", flattenLighthouseDefinitionPlan(resp.Plan)); err != nil { + return fmt.Errorf("setting `plan`: %+v", err) + } + if props := resp.Properties; props != nil { if err := d.Set("authorization", flattenLighthouseDefinitionAuthorization(props.Authorizations)); err != nil { return fmt.Errorf("setting `authorization`: %+v", err) @@ -299,3 +338,43 @@ func expandLighthouseDefinitionAuthorizationDelegatedRoleDefinitionIds(input []i } return &result, nil } + +func expandLighthouseDefinitionPlan(input []interface{}) *managedservices.Plan { + if len(input) == 0 || input[0] == nil { + return nil + } + raw := input[0].(map[string]interface{}) + return &managedservices.Plan{ + Name: utils.String(raw["name"].(string)), + Publisher: utils.String(raw["publisher"].(string)), + Product: utils.String(raw["product"].(string)), + Version: utils.String(raw["version"].(string)), + } +} + +func flattenLighthouseDefinitionPlan(input *managedservices.Plan) []interface{} { + if input == nil { + return []interface{}{} + } + var name, publisher, product, version string + if input.Name != nil { + name = *input.Name + } + if input.Publisher != nil { + publisher = *input.Publisher + } + if input.Product != nil { + product = *input.Product + } + if input.Version != nil { + version = *input.Version + } + return []interface{}{ + map[string]interface{}{ + "name": name, + "publisher": publisher, + "product": product, + "version": version, + }, + } +} diff --git a/azurerm/internal/services/lighthouse/lighthouse_definition_resource_test.go b/azurerm/internal/services/lighthouse/lighthouse_definition_resource_test.go index 2cf1e7a0ab7a..ed3e2099ffc7 100644 --- a/azurerm/internal/services/lighthouse/lighthouse_definition_resource_test.go +++ b/azurerm/internal/services/lighthouse/lighthouse_definition_resource_test.go @@ -169,6 +169,32 @@ func TestAccLighthouseDefinition_emptyID(t *testing.T) { }) } +func TestAccLighthouseDefinition_plan(t *testing.T) { + secondTenantID := os.Getenv("ARM_TENANT_ID_ALT") + principalID := os.Getenv("ARM_PRINCIPAL_ID_ALT_TENANT") + planName := os.Getenv("ARM_PLAN_NAME") + planPublisher := os.Getenv("ARM_PLAN_PUBLISHER") + planProduct := os.Getenv("ARM_PLAN_PRODUCT") + planVersion := os.Getenv("ARM_PLAN_VERSION") + if secondTenantID == "" || principalID == "" || planName == "" || planPublisher == "" || planProduct == "" || planVersion == "" { + t.Skip("Skipping as ARM_TENANT_ID_ALT, ARM_PRINCIPAL_ID_ALT_TENANT, ARM_PLAN_NAME, ARM_PLAN_PUBLISHER, ARM_PLAN_PRODUCT or ARM_PLAN_VERSION are not specified") + } + + data := acceptance.BuildTestData(t, "azurerm_lighthouse_definition", "test") + r := LighthouseDefinitionResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.plan(data, secondTenantID, principalID, planName, planPublisher, planProduct, planVersion), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("id").Exists(), + check.That(data.ResourceName).Key("lighthouse_definition_id").Exists(), + ), + }, + }) +} + func (LighthouseDefinitionResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.LighthouseDefinitionID(state.ID) if err != nil { @@ -325,3 +351,37 @@ resource "azurerm_lighthouse_definition" "test" { } `, data.RandomInteger, secondTenantID, principalID) } + +func (LighthouseDefinitionResource) plan(data acceptance.TestData, secondTenantID, principalID, planName, planPublisher, planProduct, planVersion string) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +data "azurerm_role_definition" "reader" { + role_definition_id = "acdd72a7-3385-48ef-bd42-f606fba81ae7" +} + +data "azurerm_subscription" "test" {} + +resource "azurerm_lighthouse_definition" "test" { + name = "acctest-LD-%d" + description = "Acceptance Test Lighthouse Definition" + managing_tenant_id = "%s" + scope = data.azurerm_subscription.test.id + + authorization { + principal_id = "%s" + role_definition_id = data.azurerm_role_definition.reader.role_definition_id + principal_display_name = "Reader" + } + + plan { + name = "%s" + publisher = "%s" + product = "%s" + version = "%s" + } +} +`, data.RandomInteger, secondTenantID, principalID, planName, planPublisher, planProduct, planVersion) +} diff --git a/azurerm/internal/services/loadbalancer/backend_address_pool_address_resource.go b/azurerm/internal/services/loadbalancer/backend_address_pool_address_resource.go index 944e588335c6..7512dc0516c4 100644 --- a/azurerm/internal/services/loadbalancer/backend_address_pool_address_resource.go +++ b/azurerm/internal/services/loadbalancer/backend_address_pool_address_resource.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-05-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/sdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loadbalancer/parse" diff --git a/azurerm/internal/services/loadbalancer/backend_address_pool_address_resource_test.go b/azurerm/internal/services/loadbalancer/backend_address_pool_address_resource_test.go index 8ab3921aeea5..1a106363f874 100644 --- a/azurerm/internal/services/loadbalancer/backend_address_pool_address_resource_test.go +++ b/azurerm/internal/services/loadbalancer/backend_address_pool_address_resource_test.go @@ -5,7 +5,7 @@ import ( "fmt" "testing" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-05-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/types" diff --git a/azurerm/internal/services/loadbalancer/backend_address_pool_data_source.go b/azurerm/internal/services/loadbalancer/backend_address_pool_data_source.go index 9a250a5123eb..186863582810 100644 --- a/azurerm/internal/services/loadbalancer/backend_address_pool_data_source.go +++ b/azurerm/internal/services/loadbalancer/backend_address_pool_data_source.go @@ -4,15 +4,14 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-05-01/network" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" - + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loadbalancer/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loadbalancer/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) func dataSourceArmLoadBalancerBackendAddressPool() *pluginsdk.Resource { diff --git a/azurerm/internal/services/loadbalancer/backend_address_pool_resource.go b/azurerm/internal/services/loadbalancer/backend_address_pool_resource.go index 42853a0fa4a6..981a75dbe7aa 100644 --- a/azurerm/internal/services/loadbalancer/backend_address_pool_resource.go +++ b/azurerm/internal/services/loadbalancer/backend_address_pool_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-05-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" diff --git a/azurerm/internal/services/loadbalancer/backend_address_pool_resource_test.go b/azurerm/internal/services/loadbalancer/backend_address_pool_resource_test.go index 59ae81c5f9ab..20cfaf57b40f 100644 --- a/azurerm/internal/services/loadbalancer/backend_address_pool_resource_test.go +++ b/azurerm/internal/services/loadbalancer/backend_address_pool_resource_test.go @@ -5,7 +5,7 @@ import ( "fmt" "testing" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-05-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" diff --git a/azurerm/internal/services/loadbalancer/client/client.go b/azurerm/internal/services/loadbalancer/client/client.go index 33c4928ed899..46f620460aee 100644 --- a/azurerm/internal/services/loadbalancer/client/client.go +++ b/azurerm/internal/services/loadbalancer/client/client.go @@ -1,7 +1,7 @@ package client import ( - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-05-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/common" ) diff --git a/azurerm/internal/services/loadbalancer/loadbalancer.go b/azurerm/internal/services/loadbalancer/loadbalancer.go index 519a35060ebd..aeb7864f7082 100644 --- a/azurerm/internal/services/loadbalancer/loadbalancer.go +++ b/azurerm/internal/services/loadbalancer/loadbalancer.go @@ -1,8 +1,10 @@ package loadbalancer import ( - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-05-01/network" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "context" + + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loadbalancer/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" ) @@ -108,15 +110,16 @@ func FindLoadBalancerProbeByName(lb *network.LoadBalancer, name string) (*networ } func loadBalancerSubResourceImporter(parser func(input string) (*parse.LoadBalancerId, error)) *schema.ResourceImporter { - return &schema.ResourceImporter{ - State: func(d *pluginsdk.ResourceData, meta interface{}) ([]*pluginsdk.ResourceData, error) { - lbId, err := parser(d.Id()) - if err != nil { - return nil, err - } - - d.Set("loadbalancer_id", lbId.ID()) - return []*pluginsdk.ResourceData{d}, nil - }, - } + return pluginsdk.ImporterValidatingResourceIdThen(func(id string) error { + _, err := parser(id) + return err + }, func(ctx context.Context, d *pluginsdk.ResourceData, meta interface{}) ([]*pluginsdk.ResourceData, error) { + lbId, err := parser(d.Id()) + if err != nil { + return nil, err + } + + d.Set("loadbalancer_id", lbId.ID()) + return []*pluginsdk.ResourceData{d}, nil + }) } diff --git a/azurerm/internal/services/loadbalancer/loadbalancer_data_source.go b/azurerm/internal/services/loadbalancer/loadbalancer_data_source.go index c6a3227703ce..c494fdbba072 100644 --- a/azurerm/internal/services/loadbalancer/loadbalancer_data_source.go +++ b/azurerm/internal/services/loadbalancer/loadbalancer_data_source.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-05-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" diff --git a/azurerm/internal/services/loadbalancer/loadbalancer_nat_pool_resource_test.go b/azurerm/internal/services/loadbalancer/loadbalancer_nat_pool_resource_test.go index a148e9488095..22b1df336e17 100644 --- a/azurerm/internal/services/loadbalancer/loadbalancer_nat_pool_resource_test.go +++ b/azurerm/internal/services/loadbalancer/loadbalancer_nat_pool_resource_test.go @@ -5,10 +5,9 @@ import ( "fmt" "testing" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-05-01/network" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" - + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loadbalancer/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" diff --git a/azurerm/internal/services/loadbalancer/loadbalancer_nat_rule_resource_test.go b/azurerm/internal/services/loadbalancer/loadbalancer_nat_rule_resource_test.go index bba6581662c0..b4dbb01726ce 100644 --- a/azurerm/internal/services/loadbalancer/loadbalancer_nat_rule_resource_test.go +++ b/azurerm/internal/services/loadbalancer/loadbalancer_nat_rule_resource_test.go @@ -5,7 +5,7 @@ import ( "fmt" "testing" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-05-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" diff --git a/azurerm/internal/services/loadbalancer/loadbalancer_outbound_rule_resource_test.go b/azurerm/internal/services/loadbalancer/loadbalancer_outbound_rule_resource_test.go index 9f961410ea6f..bb477ccbeece 100644 --- a/azurerm/internal/services/loadbalancer/loadbalancer_outbound_rule_resource_test.go +++ b/azurerm/internal/services/loadbalancer/loadbalancer_outbound_rule_resource_test.go @@ -5,7 +5,7 @@ import ( "fmt" "testing" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-05-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" diff --git a/azurerm/internal/services/loadbalancer/loadbalancer_probe_resource_test.go b/azurerm/internal/services/loadbalancer/loadbalancer_probe_resource_test.go index c48e565a7fbc..05bd9bf12239 100644 --- a/azurerm/internal/services/loadbalancer/loadbalancer_probe_resource_test.go +++ b/azurerm/internal/services/loadbalancer/loadbalancer_probe_resource_test.go @@ -5,10 +5,9 @@ import ( "fmt" "testing" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-05-01/network" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" - + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loadbalancer/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" diff --git a/azurerm/internal/services/loadbalancer/loadbalancer_resource.go b/azurerm/internal/services/loadbalancer/loadbalancer_resource.go index 2f9e76cc7919..31cf30cbd219 100644 --- a/azurerm/internal/services/loadbalancer/loadbalancer_resource.go +++ b/azurerm/internal/services/loadbalancer/loadbalancer_resource.go @@ -3,9 +3,10 @@ package loadbalancer import ( "fmt" "log" + "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-05-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -73,6 +74,21 @@ func resourceArmLoadBalancer() *pluginsdk.Resource { ValidateFunc: validation.StringIsNotEmpty, }, + "availability_zone": { + Type: pluginsdk.TypeString, + Optional: true, + //Default: "Zone-Redundant", + Computed: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + "No-Zone", + "1", + "2", + "3", + "Zone-Redundant", + }, false), + }, + "subnet_id": { Type: pluginsdk.TypeString, Optional: true, @@ -93,10 +109,10 @@ func resourceArmLoadBalancer() *pluginsdk.Resource { "private_ip_address_version": { Type: pluginsdk.TypeString, Optional: true, - Default: string(network.IPv4), + Computed: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.IPv4), - string(network.IPv6), + string(network.IPVersionIPv4), + string(network.IPVersionIPv6), }, false), }, @@ -119,8 +135,8 @@ func resourceArmLoadBalancer() *pluginsdk.Resource { Optional: true, Computed: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.Dynamic), - string(network.Static), + string(network.IPAllocationMethodDynamic), + string(network.IPAllocationMethodStatic), }, true), StateFunc: state.IgnoreCase, DiffSuppressFunc: suppress.CaseDifference, @@ -156,7 +172,19 @@ func resourceArmLoadBalancer() *pluginsdk.Resource { Set: pluginsdk.HashString, }, - "zones": azure.SchemaSingleZone(), + // TODO - 3.0 make Computed only + "zones": { + Type: pluginsdk.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Deprecated: "This property has been deprecated in favour of `availability_zone` due to a breaking behavioural change in Azure: https://azure.microsoft.com/en-us/updates/zone-behavior-change/", + MaxItems: 1, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, "id": { Type: pluginsdk.TypeString, @@ -216,7 +244,11 @@ func resourceArmLoadBalancerCreateUpdate(d *pluginsdk.ResourceData, meta interfa properties := network.LoadBalancerPropertiesFormat{} if _, ok := d.GetOk("frontend_ip_configuration"); ok { - properties.FrontendIPConfigurations = expandAzureRmLoadBalancerFrontendIpConfigurations(d) + frontendIPConfigurations, err := expandAzureRmLoadBalancerFrontendIpConfigurations(d) + if err != nil { + return err + } + properties.FrontendIPConfigurations = frontendIPConfigurations } loadBalancer := network.LoadBalancer{ @@ -321,11 +353,12 @@ func resourceArmLoadBalancerDelete(d *pluginsdk.ResourceData, meta interface{}) return nil } -func expandAzureRmLoadBalancerFrontendIpConfigurations(d *pluginsdk.ResourceData) *[]network.FrontendIPConfiguration { +func expandAzureRmLoadBalancerFrontendIpConfigurations(d *pluginsdk.ResourceData) (*[]network.FrontendIPConfiguration, error) { configs := d.Get("frontend_ip_configuration").([]interface{}) frontEndConfigs := make([]network.FrontendIPConfiguration, 0, len(configs)) + sku := d.Get("sku").(string) - for _, configRaw := range configs { + for index, configRaw := range configs { data := configRaw.(map[string]interface{}) privateIpAllocationMethod := data["private_ip_address_allocation"].(string) @@ -337,8 +370,7 @@ func expandAzureRmLoadBalancerFrontendIpConfigurations(d *pluginsdk.ResourceData properties.PrivateIPAddress = &v } - properties.PrivateIPAddressVersion = network.IPVersion(data["private_ip_address_version"].(string)) - + subnetSet := false if v := data["public_ip_address_id"].(string); v != "" { properties.PublicIPAddress = &network.PublicIPAddress{ ID: &v, @@ -352,13 +384,51 @@ func expandAzureRmLoadBalancerFrontendIpConfigurations(d *pluginsdk.ResourceData } if v := data["subnet_id"].(string); v != "" { + subnetSet = true + properties.PrivateIPAddressVersion = network.IPVersionIPv4 + if v := data["private_ip_address_version"].(string); v != "" { + properties.PrivateIPAddressVersion = network.IPVersion(v) + } properties.Subnet = &network.Subnet{ ID: &v, } } name := data["name"].(string) - zones := azure.ExpandZones(data["zones"].([]interface{})) + // TODO - get zone list for each location by Resource API, instead of hardcode + zones := &[]string{"1", "2"} + zonesSet := false + // TODO - Remove in 3.0 + if deprecatedZonesRaw, ok := d.GetOk(fmt.Sprintf("frontend_ip_configuration.%d.zones", index)); ok { + zonesSet = true + deprecatedZones := azure.ExpandZones(deprecatedZonesRaw.([]interface{})) + if deprecatedZones != nil { + zones = deprecatedZones + } + } + + if availabilityZones, ok := d.GetOk(fmt.Sprintf("frontend_ip_configuration.%d.availability_zone", index)); ok { + zonesSet = true + switch availabilityZones.(string) { + case "1", "2", "3": + zones = &[]string{availabilityZones.(string)} + case "Zone-Redundant": + zones = &[]string{"1", "2"} + case "No-Zone": + zones = &[]string{} + } + } + if !strings.EqualFold(sku, string(network.LoadBalancerSkuNameStandard)) { + if zonesSet && len(*zones) > 0 { + return nil, fmt.Errorf("Availability Zones are not available on the `Basic` SKU") + } + zones = &[]string{} + } else if !subnetSet { + if zonesSet && len(*zones) > 0 { + return nil, fmt.Errorf("Networking supports zones only for frontendIpconfigurations which reference a subnet.") + } + zones = &[]string{} + } frontEndConfig := network.FrontendIPConfiguration{ Name: &name, FrontendIPConfigurationPropertiesFormat: &properties, @@ -368,7 +438,7 @@ func expandAzureRmLoadBalancerFrontendIpConfigurations(d *pluginsdk.ResourceData frontEndConfigs = append(frontEndConfigs, frontEndConfig) } - return &frontEndConfigs + return &frontEndConfigs, nil } func flattenLoadBalancerFrontendIpConfiguration(ipConfigs *[]network.FrontendIPConfiguration) []interface{} { @@ -388,11 +458,20 @@ func flattenLoadBalancerFrontendIpConfiguration(ipConfigs *[]network.FrontendIPC ipConfig["id"] = *config.ID } - zones := make([]string, 0) - if zs := config.Zones; zs != nil { - zones = *zs + availabilityZones := "No-Zone" + zonesDeprecated := make([]string, 0) + if config.Zones != nil { + if len(*config.Zones) > 1 { + availabilityZones = "Zone-Redundant" + } + if len(*config.Zones) == 1 { + zones := *config.Zones + availabilityZones = zones[0] + zonesDeprecated = zones + } } - ipConfig["zones"] = zones + ipConfig["availability_zone"] = availabilityZones + ipConfig["zones"] = zonesDeprecated if props := config.FrontendIPConfigurationPropertiesFormat; props != nil { ipConfig["private_ip_address_allocation"] = string(props.PrivateIPAllocationMethod) diff --git a/azurerm/internal/services/loadbalancer/loadbalancer_resource_test.go b/azurerm/internal/services/loadbalancer/loadbalancer_resource_test.go index 2564109ad1db..20742792980f 100644 --- a/azurerm/internal/services/loadbalancer/loadbalancer_resource_test.go +++ b/azurerm/internal/services/loadbalancer/loadbalancer_resource_test.go @@ -165,6 +165,51 @@ func TestAccAzureRMLoadBalancer_privateIP(t *testing.T) { }) } +func TestAccAzureRMLoadBalancer_ZoneRedundant(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_lb", "test") + r := LoadBalancer{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.availability_zone(data, "Zone-Redundant"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccAzureRMLoadBalancer_NoZone(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_lb", "test") + r := LoadBalancer{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.availability_zone(data, "No-Zone"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccAzureRMLoadBalancer_SingleZone(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_lb", "test") + r := LoadBalancer{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.availability_zone(data, "1"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func (r LoadBalancer) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { loadBalancerName := state.Attributes["name"] resourceGroup := state.Attributes["resource_group_name"] @@ -497,3 +542,46 @@ resource "azurerm_lb" "test" { } `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger) } + +func (r LoadBalancer) availability_zone(data acceptance.TestData, zone string) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-lb-%d" + location = "%s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctvn-%d" + address_space = ["10.0.0.0/16"] + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_subnet" "test" { + name = "acctsub-%d" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name + address_prefix = "10.0.2.0/24" +} + +resource "azurerm_lb" "test" { + name = "acctestlb-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + sku = "Standard" + + frontend_ip_configuration { + name = "Internal" + private_ip_address_allocation = "Static" + private_ip_address_version = "IPv4" + private_ip_address = "10.0.2.7" + subnet_id = azurerm_subnet.test.id + availability_zone = "%s" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, zone) +} diff --git a/azurerm/internal/services/loadbalancer/loadbalancer_rule_resource_test.go b/azurerm/internal/services/loadbalancer/loadbalancer_rule_resource_test.go index 3e355c55cd05..1c7ab958acdc 100644 --- a/azurerm/internal/services/loadbalancer/loadbalancer_rule_resource_test.go +++ b/azurerm/internal/services/loadbalancer/loadbalancer_rule_resource_test.go @@ -5,7 +5,7 @@ import ( "fmt" "testing" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-05-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -156,7 +156,7 @@ func TestAccAzureRMLoadBalancerRule_updateMultipleRules(t *testing.T) { func TestAccAzureRMLoadBalancerRule_vmssBackendPoolUpdateRemoveLBRule(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_lb_rule", "test") - lbRuleName := fmt.Sprintf("LbRule-%s", acceptance.RandStringFromCharSet(8, acceptance.CharSetAlpha)) + lbRuleName := fmt.Sprintf("LbRule-%s", data.RandomString) r := LoadBalancerRule{} data.ResourceTest(t, r, []acceptance.TestStep{ diff --git a/azurerm/internal/services/loadbalancer/nat_pool_resource.go b/azurerm/internal/services/loadbalancer/nat_pool_resource.go index 776e9ee44f80..0eb3798b34e7 100644 --- a/azurerm/internal/services/loadbalancer/nat_pool_resource.go +++ b/azurerm/internal/services/loadbalancer/nat_pool_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-05-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" diff --git a/azurerm/internal/services/loadbalancer/nat_rule_resource.go b/azurerm/internal/services/loadbalancer/nat_rule_resource.go index d6bf6ef51b58..9c76194738d5 100644 --- a/azurerm/internal/services/loadbalancer/nat_rule_resource.go +++ b/azurerm/internal/services/loadbalancer/nat_rule_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-05-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" diff --git a/azurerm/internal/services/loadbalancer/outbound_rule_resource.go b/azurerm/internal/services/loadbalancer/outbound_rule_resource.go index 31c13a5480bd..213ec6a636e7 100644 --- a/azurerm/internal/services/loadbalancer/outbound_rule_resource.go +++ b/azurerm/internal/services/loadbalancer/outbound_rule_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-05-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" diff --git a/azurerm/internal/services/loadbalancer/probe_resource.go b/azurerm/internal/services/loadbalancer/probe_resource.go index ba3be1d92a0d..94dcb2895b9d 100644 --- a/azurerm/internal/services/loadbalancer/probe_resource.go +++ b/azurerm/internal/services/loadbalancer/probe_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-05-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" diff --git a/azurerm/internal/services/loadbalancer/registration.go b/azurerm/internal/services/loadbalancer/registration.go index 98862829f576..51b7a883879a 100644 --- a/azurerm/internal/services/loadbalancer/registration.go +++ b/azurerm/internal/services/loadbalancer/registration.go @@ -49,11 +49,6 @@ func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { } } -// PackagePath is the relative path to this package -func (r Registration) PackagePath() string { - return "TODO: do we need this?" -} - // Resources returns a list of Resources supported by this Service func (r Registration) Resources() []sdk.Resource { return []sdk.Resource{ diff --git a/azurerm/internal/services/loadbalancer/rule_resource.go b/azurerm/internal/services/loadbalancer/rule_resource.go index 6c57ac373677..ead5946604fd 100644 --- a/azurerm/internal/services/loadbalancer/rule_resource.go +++ b/azurerm/internal/services/loadbalancer/rule_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-05-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" diff --git a/azurerm/internal/services/loganalytics/log_analytics_cluster_customer_managed_key_resource.go b/azurerm/internal/services/loganalytics/log_analytics_cluster_customer_managed_key_resource.go index ae5c7be5f2b7..d3114505cdcb 100644 --- a/azurerm/internal/services/loganalytics/log_analytics_cluster_customer_managed_key_resource.go +++ b/azurerm/internal/services/loganalytics/log_analytics_cluster_customer_managed_key_resource.go @@ -112,7 +112,7 @@ func resourceLogAnalyticsClusterCustomerManagedKeyUpdate(d *pluginsdk.ResourceDa updateWait := logAnalyticsClusterWaitForState(ctx, meta, d.Timeout(pluginsdk.TimeoutUpdate), clusterId.ResourceGroup, clusterId.ClusterName) - if _, err := updateWait.WaitForState(); err != nil { + if _, err := updateWait.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for Log Analytics Cluster to finish updating %q (Resource Group %q): %v", clusterId.ClusterName, clusterId.ResourceGroup, err) } @@ -196,7 +196,7 @@ func resourceLogAnalyticsClusterCustomerManagedKeyDelete(d *pluginsdk.ResourceDa deleteWait := logAnalyticsClusterWaitForState(ctx, meta, d.Timeout(pluginsdk.TimeoutDelete), clusterId.ResourceGroup, clusterId.ClusterName) - if _, err := deleteWait.WaitForState(); err != nil { + if _, err := deleteWait.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for Log Analytics Cluster to finish updating %q (Resource Group %q): %v", clusterId.ClusterName, clusterId.ResourceGroup, err) } diff --git a/azurerm/internal/services/loganalytics/log_analytics_cluster_resource.go b/azurerm/internal/services/loganalytics/log_analytics_cluster_resource.go index 94c30a8d4f1d..6b1ff71aa090 100644 --- a/azurerm/internal/services/loganalytics/log_analytics_cluster_resource.go +++ b/azurerm/internal/services/loganalytics/log_analytics_cluster_resource.go @@ -148,7 +148,7 @@ func resourceLogAnalyticsClusterCreate(d *pluginsdk.ResourceData, meta interface createWait := logAnalyticsClusterWaitForState(ctx, meta, d.Timeout(pluginsdk.TimeoutCreate), id.ResourceGroup, id.ClusterName) - if _, err := createWait.WaitForState(); err != nil { + if _, err := createWait.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for Log Analytics Cluster to finish updating %q (Resource Group %q): %v", id.ClusterName, id.ResourceGroup, err) } @@ -229,7 +229,7 @@ func resourceLogAnalyticsClusterUpdate(d *pluginsdk.ResourceData, meta interface updateWait := logAnalyticsClusterWaitForState(ctx, meta, d.Timeout(pluginsdk.TimeoutUpdate), id.ResourceGroup, id.ClusterName) - if _, err := updateWait.WaitForState(); err != nil { + if _, err := updateWait.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for Log Analytics Cluster to finish updating %q (Resource Group %q): %v", id.ClusterName, id.ResourceGroup, err) } diff --git a/azurerm/internal/services/loganalytics/log_analytics_data_export_resource_test.go b/azurerm/internal/services/loganalytics/log_analytics_data_export_resource_test.go index 10a8c99e676b..2e2c505d6662 100644 --- a/azurerm/internal/services/loganalytics/log_analytics_data_export_resource_test.go +++ b/azurerm/internal/services/loganalytics/log_analytics_data_export_resource_test.go @@ -5,13 +5,12 @@ import ( "fmt" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loganalytics/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) type LogAnalyticsDataExportRuleResource struct { diff --git a/azurerm/internal/services/loganalytics/log_analytics_datasource_windows_event_resource.go b/azurerm/internal/services/loganalytics/log_analytics_datasource_windows_event_resource.go index 65a0227609d5..a3f5f1a744f3 100644 --- a/azurerm/internal/services/loganalytics/log_analytics_datasource_windows_event_resource.go +++ b/azurerm/internal/services/loganalytics/log_analytics_datasource_windows_event_resource.go @@ -7,7 +7,6 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/operationalinsights/mgmt/2020-08-01/operationalinsights" - "github.com/hashicorp/terraform-plugin-sdk/helper/structure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -164,7 +163,7 @@ func resourceLogAnalyticsDataSourceWindowsEventRead(d *pluginsdk.ResourceData, m d.Set("resource_group_name", id.ResourceGroup) d.Set("workspace_name", id.Workspace) if props := resp.Properties; props != nil { - propStr, err := structure.FlattenJsonToString(props.(map[string]interface{})) + propStr, err := pluginsdk.FlattenJsonToString(props.(map[string]interface{})) if err != nil { return fmt.Errorf("failed to flatten properties map to json for Log Analytics DataSource Windows Event %q (Resource Group %q / Workspace: %q): %+v", id.Name, id.ResourceGroup, id.Workspace, err) } diff --git a/azurerm/internal/services/loganalytics/log_analytics_datasource_windows_performance_counter_resource.go b/azurerm/internal/services/loganalytics/log_analytics_datasource_windows_performance_counter_resource.go index fef64e45bd61..07b10a57c98a 100644 --- a/azurerm/internal/services/loganalytics/log_analytics_datasource_windows_performance_counter_resource.go +++ b/azurerm/internal/services/loganalytics/log_analytics_datasource_windows_performance_counter_resource.go @@ -8,7 +8,6 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/operationalinsights/mgmt/2020-08-01/operationalinsights" - "github.com/hashicorp/terraform-plugin-sdk/helper/structure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -170,7 +169,7 @@ func resourceLogAnalyticsDataSourceWindowsPerformanceCounterRead(d *pluginsdk.Re d.Set("resource_group_name", id.ResourceGroup) d.Set("workspace_name", id.Workspace) if props := resp.Properties; props != nil { - propStr, err := structure.FlattenJsonToString(props.(map[string]interface{})) + propStr, err := pluginsdk.FlattenJsonToString(props.(map[string]interface{})) if err != nil { return fmt.Errorf("failed to flatten properties map to json for Log Analytics DataSource Windows Performance Counter %q (Resource Group %q / Workspace: %q): %+v", id.Name, id.ResourceGroup, id.Workspace, err) } diff --git a/azurerm/internal/services/loganalytics/log_analytics_linked_service_resource.go b/azurerm/internal/services/loganalytics/log_analytics_linked_service_resource.go index 60b2b07ad5cd..fcc0fa7e8e64 100644 --- a/azurerm/internal/services/loganalytics/log_analytics_linked_service_resource.go +++ b/azurerm/internal/services/loganalytics/log_analytics_linked_service_resource.go @@ -361,7 +361,7 @@ func resourceLogAnalyticsLinkedServiceDelete(d *pluginsdk.ResourceData, meta int // so we must wait for the state to change before we return from the delete function deleteWait := logAnalyticsLinkedServiceDeleteWaitForState(ctx, meta, d.Timeout(pluginsdk.TimeoutDelete), resourceGroup, workspaceName, serviceType) - if _, err := deleteWait.WaitForState(); err != nil { + if _, err := deleteWait.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for Log Analytics Cluster to finish deleting '%s/%s' (Resource Group %q): %+v", workspaceName, serviceType, resourceGroup, err) } diff --git a/azurerm/internal/services/loganalytics/log_analytics_linked_service_resource_test.go b/azurerm/internal/services/loganalytics/log_analytics_linked_service_resource_test.go index c08deca1f3a2..7de714bea2b0 100644 --- a/azurerm/internal/services/loganalytics/log_analytics_linked_service_resource_test.go +++ b/azurerm/internal/services/loganalytics/log_analytics_linked_service_resource_test.go @@ -7,12 +7,11 @@ import ( "testing" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) type LogAnalyticsLinkedServiceResource struct { diff --git a/azurerm/internal/services/loganalytics/log_analytics_saved_search_resource_test.go b/azurerm/internal/services/loganalytics/log_analytics_saved_search_resource_test.go index 9b6131ec052f..97da03a90112 100644 --- a/azurerm/internal/services/loganalytics/log_analytics_saved_search_resource_test.go +++ b/azurerm/internal/services/loganalytics/log_analytics_saved_search_resource_test.go @@ -6,13 +6,12 @@ import ( "strings" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loganalytics/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) type LogAnalyticsSavedSearchResource struct { diff --git a/azurerm/internal/services/loganalytics/log_analytics_solution_resource.go b/azurerm/internal/services/loganalytics/log_analytics_solution_resource.go index 13e5998ea9d6..6f060b988d44 100644 --- a/azurerm/internal/services/loganalytics/log_analytics_solution_resource.go +++ b/azurerm/internal/services/loganalytics/log_analytics_solution_resource.go @@ -6,15 +6,14 @@ import ( "strings" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loganalytics/validate" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" - "github.com/Azure/azure-sdk-for-go/services/preview/operationsmanagement/mgmt/2015-11-01-preview/operationsmanagement" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" loganalyticsParse "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loganalytics/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loganalytics/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" diff --git a/azurerm/internal/services/loganalytics/log_analytics_solution_resource_test.go b/azurerm/internal/services/loganalytics/log_analytics_solution_resource_test.go index 0c6e4f0dbcbd..e51ee0470c28 100644 --- a/azurerm/internal/services/loganalytics/log_analytics_solution_resource_test.go +++ b/azurerm/internal/services/loganalytics/log_analytics_solution_resource_test.go @@ -6,13 +6,11 @@ import ( "testing" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" - - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) type LogAnalyticsSolutionResource struct { diff --git a/azurerm/internal/services/loganalytics/log_analytics_storage_insights_import.go b/azurerm/internal/services/loganalytics/log_analytics_storage_insights_import.go deleted file mode 100644 index 9c63e47dd632..000000000000 --- a/azurerm/internal/services/loganalytics/log_analytics_storage_insights_import.go +++ /dev/null @@ -1,18 +0,0 @@ -package loganalytics - -import ( - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loganalytics/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" -) - -func logAnalyticsStorageInsightsImporter(d *pluginsdk.ResourceData, meta interface{}) ([]*pluginsdk.ResourceData, error) { - if _, err := parse.LogAnalyticsStorageInsightsID(d.Id()); err != nil { - return []*pluginsdk.ResourceData{d}, err - } - - if v, ok := d.GetOk("storage_account_key"); ok && v.(string) != "" { - d.Set("storage_account_key", v) - } - - return []*pluginsdk.ResourceData{d}, nil -} diff --git a/azurerm/internal/services/loganalytics/log_analytics_storage_insights_resource.go b/azurerm/internal/services/loganalytics/log_analytics_storage_insights_resource.go index 0b03a45da5d7..2c7bfaf96bda 100644 --- a/azurerm/internal/services/loganalytics/log_analytics_storage_insights_resource.go +++ b/azurerm/internal/services/loganalytics/log_analytics_storage_insights_resource.go @@ -1,14 +1,15 @@ package loganalytics import ( + "context" "fmt" "log" "time" "github.com/Azure/azure-sdk-for-go/services/operationalinsights/mgmt/2020-08-01/operationalinsights" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + azValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loganalytics/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loganalytics/validate" @@ -34,9 +35,16 @@ func resourceLogAnalyticsStorageInsights() *pluginsdk.Resource { Delete: pluginsdk.DefaultTimeout(30 * time.Minute), }, - Importer: &schema.ResourceImporter{ - State: logAnalyticsStorageInsightsImporter, - }, + Importer: pluginsdk.ImporterValidatingResourceIdThen(func(id string) error { + _, err := parse.LogAnalyticsStorageInsightsID(id) + return err + }, func(ctx context.Context, d *pluginsdk.ResourceData, meta interface{}) ([]*pluginsdk.ResourceData, error) { + if v, ok := d.GetOk("storage_account_key"); ok && v.(string) != "" { + d.Set("storage_account_key", v) + } + + return []*pluginsdk.ResourceData{d}, nil + }), Schema: map[string]*pluginsdk.Schema{ "name": { @@ -62,13 +70,10 @@ func resourceLogAnalyticsStorageInsights() *pluginsdk.Resource { }, "storage_account_key": { - Type: pluginsdk.TypeString, - Required: true, - Sensitive: true, - ValidateFunc: validation.All( - validation.StringIsNotEmpty, - validate.IsBase64Encoded, - ), + Type: pluginsdk.TypeString, + Required: true, + Sensitive: true, + ValidateFunc: azValidate.Base64EncodedString, }, "blob_container_names": { diff --git a/azurerm/internal/services/loganalytics/log_analytics_workspace_data_source.go b/azurerm/internal/services/loganalytics/log_analytics_workspace_data_source.go index 640803c441ad..035351cef6c9 100644 --- a/azurerm/internal/services/loganalytics/log_analytics_workspace_data_source.go +++ b/azurerm/internal/services/loganalytics/log_analytics_workspace_data_source.go @@ -5,10 +5,9 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loganalytics/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loganalytics/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" diff --git a/azurerm/internal/services/loganalytics/log_analytics_workspace_resource_test.go b/azurerm/internal/services/loganalytics/log_analytics_workspace_resource_test.go index 00bab8d34e8f..8f24af08b0f9 100644 --- a/azurerm/internal/services/loganalytics/log_analytics_workspace_resource_test.go +++ b/azurerm/internal/services/loganalytics/log_analytics_workspace_resource_test.go @@ -5,13 +5,12 @@ import ( "fmt" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/loganalytics/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) type LogAnalyticsWorkspaceResource struct { diff --git a/azurerm/internal/services/loganalytics/validate/base64_encoded.go b/azurerm/internal/services/loganalytics/validate/base64_encoded.go deleted file mode 100644 index eb14253d5f63..000000000000 --- a/azurerm/internal/services/loganalytics/validate/base64_encoded.go +++ /dev/null @@ -1,27 +0,0 @@ -package validate - -import ( - "encoding/base64" - "fmt" - "strings" -) - -func IsBase64Encoded(i interface{}, k string) (warnings []string, errors []error) { - v, ok := i.(string) - if !ok { - errors = append(errors, fmt.Errorf("expected %q to be a string", k)) - return - } - - if len(strings.TrimSpace(v)) < 1 { - errors = append(errors, fmt.Errorf("%q must not be an empty string", k)) - return - } - - if _, err := base64.StdEncoding.DecodeString(v); err != nil { - errors = append(errors, fmt.Errorf("expected %q to be a base64 encoded string", k)) - return - } - - return -} diff --git a/azurerm/internal/services/logic/integration_service_environment.go b/azurerm/internal/services/logic/integration_service_environment.go index 165c32f9ecc3..b894f247e1d0 100644 --- a/azurerm/internal/services/logic/integration_service_environment.go +++ b/azurerm/internal/services/logic/integration_service_environment.go @@ -9,7 +9,7 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/logic/mgmt/2019-05-01/logic" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -314,7 +314,7 @@ func resourceIntegrationServiceEnvironmentDelete(d *pluginsdk.ResourceData, meta NotFoundChecks: 1, } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for deletion of Integration Service Environment %q (Resource Group %q): %+v", name, resourceGroup, err) } diff --git a/azurerm/internal/services/logic/logic_app_action_custom_resource.go b/azurerm/internal/services/logic/logic_app_action_custom_resource.go index 8af10e704370..5bf3ce0f89ed 100644 --- a/azurerm/internal/services/logic/logic_app_action_custom_resource.go +++ b/azurerm/internal/services/logic/logic_app_action_custom_resource.go @@ -6,7 +6,6 @@ import ( "log" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/structure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" @@ -46,7 +45,7 @@ func resourceLogicAppActionCustom() *pluginsdk.Resource { Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsJSON, - DiffSuppressFunc: structure.SuppressJsonDiff, + DiffSuppressFunc: pluginsdk.SuppressJsonDiff, }, }, } diff --git a/azurerm/internal/services/logic/logic_app_action_http_resource.go b/azurerm/internal/services/logic/logic_app_action_http_resource.go index d9fd4777d5f3..4fdc363c129a 100644 --- a/azurerm/internal/services/logic/logic_app_action_http_resource.go +++ b/azurerm/internal/services/logic/logic_app_action_http_resource.go @@ -1,6 +1,7 @@ package logic import ( + "encoding/json" "fmt" "log" "net/http" @@ -62,8 +63,10 @@ func resourceLogicAppActionHTTP() *pluginsdk.Resource { }, "body": { - Type: pluginsdk.TypeString, - Optional: true, + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsJSON, + DiffSuppressFunc: pluginsdk.SuppressJsonDiff, }, "headers": { @@ -101,6 +104,9 @@ func resourceLogicAppActionHTTP() *pluginsdk.Resource { } func resourceLogicAppActionHTTPCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + logicAppId := d.Get("logic_app_id").(string) + name := d.Get("name").(string) + headersRaw := d.Get("headers").(map[string]interface{}) headers, err := expandLogicAppActionHttpHeaders(headersRaw) if err != nil { @@ -113,8 +119,13 @@ func resourceLogicAppActionHTTPCreateUpdate(d *pluginsdk.ResourceData, meta inte "headers": headers, } - if v, ok := d.GetOk("body"); ok { - inputs["body"] = v.(string) + // storing action's body in json object to keep consistent with azure portal + if bodyRaw, ok := d.GetOk("body"); ok { + var body map[string]interface{} + if err := json.Unmarshal([]byte(bodyRaw.(string)), &body); err != nil { + return fmt.Errorf("error unmarshalling JSON for Action %q: %+v", name, err) + } + inputs["body"] = body } action := map[string]interface{}{ @@ -126,8 +137,6 @@ func resourceLogicAppActionHTTPCreateUpdate(d *pluginsdk.ResourceData, meta inte action["runAfter"] = expandLogicAppActionRunAfter(v.(*pluginsdk.Set).List()) } - logicAppId := d.Get("logic_app_id").(string) - name := d.Get("name").(string) err = resourceLogicAppActionUpdate(d, meta, logicAppId, name, action, "azurerm_logic_app_action_http") if err != nil { return err @@ -186,7 +195,17 @@ func resourceLogicAppActionHTTPRead(d *pluginsdk.ResourceData, meta interface{}) } if body := inputs["body"]; body != nil { - d.Set("body", body.(string)) + // TODO: remove in 3.0, this is preserved for backward compatibility + if v, ok := body.(string); ok { + d.Set("body", v) + } else { + // if user edit workflow in portal, the body becomes json object + v, err := json.Marshal(body) + if err != nil { + return fmt.Errorf("error serializing `body` for Action %q: %+v", name, err) + } + d.Set("body", string(v)) + } } if headers := inputs["headers"]; headers != nil { diff --git a/azurerm/internal/services/logic/logic_app_trigger_custom_resource.go b/azurerm/internal/services/logic/logic_app_trigger_custom_resource.go index 862ba21ee56a..b6e2a09b0865 100644 --- a/azurerm/internal/services/logic/logic_app_trigger_custom_resource.go +++ b/azurerm/internal/services/logic/logic_app_trigger_custom_resource.go @@ -6,7 +6,6 @@ import ( "log" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/structure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" @@ -46,7 +45,7 @@ func resourceLogicAppTriggerCustom() *pluginsdk.Resource { Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsJSON, - DiffSuppressFunc: structure.SuppressJsonDiff, + DiffSuppressFunc: pluginsdk.SuppressJsonDiff, }, }, } diff --git a/azurerm/internal/services/logic/logic_app_trigger_http_request_resource.go b/azurerm/internal/services/logic/logic_app_trigger_http_request_resource.go index 14b1004a379d..b9005dedfed4 100644 --- a/azurerm/internal/services/logic/logic_app_trigger_http_request_resource.go +++ b/azurerm/internal/services/logic/logic_app_trigger_http_request_resource.go @@ -8,7 +8,6 @@ import ( "net/http" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/structure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/logic/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" @@ -61,7 +60,7 @@ func resourceLogicAppTriggerHttpRequest() *pluginsdk.Resource { Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringIsJSON, - DiffSuppressFunc: structure.SuppressJsonDiff, + DiffSuppressFunc: pluginsdk.SuppressJsonDiff, }, "method": { diff --git a/azurerm/internal/services/logic/logic_app_workflow_resource.go b/azurerm/internal/services/logic/logic_app_workflow_resource.go index 314cc9a17a5a..dc131b449e0a 100644 --- a/azurerm/internal/services/logic/logic_app_workflow_resource.go +++ b/azurerm/internal/services/logic/logic_app_workflow_resource.go @@ -81,7 +81,7 @@ func resourceLogicAppWorkflow() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, ForceNew: true, - Default: "https://pluginsdk.management.azure.com/providers/Microsoft.Logic/schemas/2016-06-01/workflowdefinition.json#", + Default: "https://schema.management.azure.com/providers/Microsoft.Logic/schemas/2016-06-01/workflowdefinition.json#", }, "workflow_version": { @@ -306,8 +306,12 @@ func resourceLogicAppWorkflowRead(d *pluginsdk.ResourceData, meta interface{}) e } if definition := props.Definition; definition != nil { if v, ok := definition.(map[string]interface{}); ok { - d.Set("workflow_schema", v["$schema"].(string)) - d.Set("workflow_version", v["contentVersion"].(string)) + if v["$schema"] != nil { + d.Set("workflow_schema", v["$schema"].(string)) + } + if v["contentVersion"] != nil { + d.Set("workflow_version", v["contentVersion"].(string)) + } } } diff --git a/azurerm/internal/services/machinelearning/machine_learning_compute_cluster_resource.go b/azurerm/internal/services/machinelearning/machine_learning_compute_cluster_resource.go new file mode 100644 index 000000000000..44de7ed68355 --- /dev/null +++ b/azurerm/internal/services/machinelearning/machine_learning_compute_cluster_resource.go @@ -0,0 +1,348 @@ +package machinelearning + +import ( + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/services/machinelearningservices/mgmt/2020-04-01/machinelearningservices" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/machinelearning/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceComputeCluster() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceComputeClusterCreate, + Read: resourceComputeClusterRead, + Delete: resourceComputeClusterDelete, + + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.ComputeClusterID(id) + return err + }), + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + }, + + "machine_learning_workspace_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + }, + + "location": azure.SchemaLocation(), + + "vm_size": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + }, + + "vm_priority": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{string(machinelearningservices.Dedicated), string(machinelearningservices.LowPriority)}, false), + }, + + "identity": { + Type: pluginsdk.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "type": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + string(machinelearningservices.SystemAssigned), + }, false), + }, + "principal_id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "tenant_id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + }, + }, + }, + + "scale_settings": { + Type: pluginsdk.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "max_node_count": { + Type: pluginsdk.TypeInt, + Required: true, + ForceNew: true, + }, + "min_node_count": { + Type: pluginsdk.TypeInt, + Required: true, + ForceNew: true, + }, + "scale_down_nodes_after_idle_duration": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + }, + }, + }, + }, + + "description": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + }, + + "subnet_resource_id": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + }, + + "tags": tags.ForceNewSchema(), + }, + } +} + +func resourceComputeClusterCreate(d *pluginsdk.ResourceData, meta interface{}) error { + mlWorkspacesClient := meta.(*clients.Client).MachineLearning.WorkspacesClient + mlComputeClient := meta.(*clients.Client).MachineLearning.MachineLearningComputeClient + ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) + defer cancel() + + name := d.Get("name").(string) + + // Get Machine Learning Workspace Name and Resource Group from ID + workspaceID, err := parse.WorkspaceID(d.Get("machine_learning_workspace_id").(string)) + if err != nil { + return err + } + + existing, err := mlComputeClient.Get(ctx, workspaceID.ResourceGroup, workspaceID.Name, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("error checking for existing Compute Cluster %q in Workspace %q (Resource Group %q): %s", + name, workspaceID.Name, workspaceID.ResourceGroup, err) + } + } + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_machine_learning_compute_cluster", *existing.ID) + } + + computeClusterAmlComputeProperties := machinelearningservices.AmlComputeProperties{ + VMSize: utils.String(d.Get("vm_size").(string)), + VMPriority: machinelearningservices.VMPriority(d.Get("vm_priority").(string)), + ScaleSettings: expandScaleSettings(d.Get("scale_settings").([]interface{})), + } + + if subnetId, ok := d.GetOk("subnet_resource_id"); ok && subnetId.(string) != "" { + computeClusterAmlComputeProperties.Subnet = &machinelearningservices.ResourceID{ID: utils.String(subnetId.(string))} + } + + computeClusterProperties := machinelearningservices.AmlCompute{ + Properties: &computeClusterAmlComputeProperties, + ComputeLocation: utils.String(d.Get("location").(string)), + Description: utils.String(d.Get("description").(string)), + } + + amlComputeProperties, isAmlCompute := (machinelearningservices.BasicCompute).AsAmlCompute(computeClusterProperties) + if !isAmlCompute { + return fmt.Errorf("no compute cluster") + } + + // Get SKU from Workspace + workspace, err := mlWorkspacesClient.Get(ctx, workspaceID.ResourceGroup, workspaceID.Name) + if err != nil { + return err + } + + computeClusterParameters := machinelearningservices.ComputeResource{ + Properties: amlComputeProperties, + Identity: expandComputeClusterIdentity(d.Get("identity").([]interface{})), + Location: computeClusterProperties.ComputeLocation, + Tags: tags.Expand(d.Get("tags").(map[string]interface{})), + Sku: workspace.Sku, + } + + future, err := mlComputeClient.CreateOrUpdate(ctx, workspaceID.ResourceGroup, workspaceID.Name, name, computeClusterParameters) + if err != nil { + return fmt.Errorf("creating Compute Cluster %q in workspace %q (Resource Group %q): %+v", + name, workspaceID.Name, workspaceID.ResourceGroup, err) + } + if err := future.WaitForCompletionRef(ctx, mlComputeClient.Client); err != nil { + return fmt.Errorf("waiting for creation of Compute Cluster %q in workspace %q (Resource Group %q): %+v", + name, workspaceID.Name, workspaceID.ResourceGroup, err) + } + + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + id := parse.NewComputeClusterID(subscriptionId, workspaceID.ResourceGroup, workspaceID.Name, name) + d.SetId(id.ID()) + + return resourceComputeClusterRead(d, meta) +} + +func resourceComputeClusterRead(d *pluginsdk.ResourceData, meta interface{}) error { + mlComputeClient := meta.(*clients.Client).MachineLearning.MachineLearningComputeClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.ComputeClusterID(d.Id()) + if err != nil { + return fmt.Errorf("parsing Compute Cluster ID `%q`: %+v", d.Id(), err) + } + + computeResource, err := mlComputeClient.Get(ctx, id.ResourceGroup, id.WorkspaceName, id.ComputeName) + if err != nil { + if utils.ResponseWasNotFound(computeResource.Response) { + d.SetId("") + return nil + } + return fmt.Errorf("making Read request on Compute Cluster %q in Workspace %q (Resource Group %q): %+v", + id.ComputeName, id.WorkspaceName, id.ResourceGroup, err) + } + + d.Set("name", id.ComputeName) + + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + workspaceId := parse.NewWorkspaceID(subscriptionId, id.ResourceGroup, id.WorkspaceName) + d.Set("machine_learning_workspace_id", workspaceId.ID()) + + // use ComputeResource to get to AKS Cluster ID and other properties + computeCluster, isComputeCluster := (machinelearningservices.BasicCompute).AsAmlCompute(computeResource.Properties) + if !isComputeCluster { + return fmt.Errorf("compute resource %s is not an Aml Compute cluster", id.ComputeName) + } + + if props := computeCluster.Properties; props != nil { + d.Set("vm_size", props.VMSize) + d.Set("vm_priority", props.VMPriority) + d.Set("scale_settings", flattenScaleSettings(props.ScaleSettings)) + if props.Subnet != nil { + d.Set("subnet_resource_id", props.Subnet.ID) + } + } + + if location := computeResource.Location; location != nil { + d.Set("location", azure.NormalizeLocation(*location)) + } + + if err := d.Set("identity", flattenComputeClusterIdentity(computeResource.Identity)); err != nil { + return fmt.Errorf("flattening identity on Workspace %q (Resource Group %q): %+v", + id.ComputeName, id.ResourceGroup, err) + } + + return tags.FlattenAndSet(d, computeResource.Tags) +} + +func resourceComputeClusterDelete(d *pluginsdk.ResourceData, meta interface{}) error { + mlComputeClient := meta.(*clients.Client).MachineLearning.MachineLearningComputeClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + id, err := parse.ComputeClusterID(d.Id()) + if err != nil { + return fmt.Errorf("parsing Compute Cluster ID `%q`: %+v", d.Id(), err) + } + future, err := mlComputeClient.Delete(ctx, id.ResourceGroup, id.WorkspaceName, id.ComputeName, machinelearningservices.Detach) + if err != nil { + return fmt.Errorf("deleting Compute Cluster %q in workspace %q (Resource Group %q): %+v", id.ComputeName, id.WorkspaceName, id.ResourceGroup, err) + } + if err := future.WaitForCompletionRef(ctx, mlComputeClient.Client); err != nil { + return fmt.Errorf("waiting for deletion of Compute Cluster %q in workspace %q (Resource Group %q): %+v", id.ComputeName, id.WorkspaceName, id.ResourceGroup, err) + } + return nil +} + +func expandScaleSettings(input []interface{}) *machinelearningservices.ScaleSettings { + if len(input) == 0 { + return nil + } + + v := input[0].(map[string]interface{}) + + max_node_count := int32(v["max_node_count"].(int)) + min_node_count := int32(v["min_node_count"].(int)) + scale_down_nodes_after_idle_duration := v["scale_down_nodes_after_idle_duration"].(string) + + return &machinelearningservices.ScaleSettings{ + MaxNodeCount: &max_node_count, + MinNodeCount: &min_node_count, + NodeIdleTimeBeforeScaleDown: &scale_down_nodes_after_idle_duration, + } +} + +func flattenScaleSettings(scaleSettings *machinelearningservices.ScaleSettings) []interface{} { + if scaleSettings == nil { + return []interface{}{} + } + + return []interface{}{ + map[string]interface{}{ + "max_node_count": scaleSettings.MaxNodeCount, + "min_node_count": scaleSettings.MinNodeCount, + "scale_down_nodes_after_idle_duration": scaleSettings.NodeIdleTimeBeforeScaleDown, + }, + } +} + +func expandComputeClusterIdentity(input []interface{}) *machinelearningservices.Identity { + if len(input) == 0 { + return nil + } + + v := input[0].(map[string]interface{}) + + return &machinelearningservices.Identity{ + Type: machinelearningservices.ResourceIdentityType(v["type"].(string)), + } +} + +func flattenComputeClusterIdentity(identity *machinelearningservices.Identity) []interface{} { + if identity == nil { + return []interface{}{} + } + + principalID := "" + if identity.PrincipalID != nil { + principalID = *identity.PrincipalID + } + + tenantID := "" + if identity.TenantID != nil { + tenantID = *identity.TenantID + } + + return []interface{}{ + map[string]interface{}{ + "type": string(identity.Type), + "principal_id": principalID, + "tenant_id": tenantID, + }, + } +} diff --git a/azurerm/internal/services/machinelearning/machine_learning_compute_cluster_resource_test.go b/azurerm/internal/services/machinelearning/machine_learning_compute_cluster_resource_test.go new file mode 100644 index 000000000000..f825fb2a54cb --- /dev/null +++ b/azurerm/internal/services/machinelearning/machine_learning_compute_cluster_resource_test.go @@ -0,0 +1,312 @@ +package machinelearning_test + +import ( + "context" + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/machinelearning/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type ComputeClusterResource struct{} + +func TestAccComputeCluster_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_machine_learning_compute_cluster", "test") + r := ComputeClusterResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("identity.#").HasValue("1"), + check.That(data.ResourceName).Key("identity.0.type").HasValue("SystemAssigned"), + check.That(data.ResourceName).Key("identity.0.principal_id").Exists(), + check.That(data.ResourceName).Key("identity.0.tenant_id").Exists(), + check.That(data.ResourceName).Key("scale_settings.#").HasValue("1"), + check.That(data.ResourceName).Key("scale_settings.0.max_node_count").Exists(), + check.That(data.ResourceName).Key("scale_settings.0.min_node_count").Exists(), + check.That(data.ResourceName).Key("scale_settings.0.scale_down_nodes_after_idle_duration").Exists(), + ), + }, + data.ImportStep(), + }) +} + +func TestAccComputeCluster_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_machine_learning_compute_cluster", "test") + r := ComputeClusterResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("identity.#").HasValue("1"), + check.That(data.ResourceName).Key("identity.0.type").HasValue("SystemAssigned"), + check.That(data.ResourceName).Key("identity.0.principal_id").Exists(), + check.That(data.ResourceName).Key("identity.0.tenant_id").Exists(), + check.That(data.ResourceName).Key("scale_settings.#").HasValue("1"), + check.That(data.ResourceName).Key("scale_settings.0.max_node_count").Exists(), + check.That(data.ResourceName).Key("scale_settings.0.min_node_count").Exists(), + check.That(data.ResourceName).Key("scale_settings.0.scale_down_nodes_after_idle_duration").Exists(), + ), + }, + data.ImportStep(), + }) +} + +func TestAccComputeCluster_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_machine_learning_compute_cluster", "test") + r := ComputeClusterResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("identity.#").HasValue("1"), + check.That(data.ResourceName).Key("identity.0.type").HasValue("SystemAssigned"), + check.That(data.ResourceName).Key("identity.0.principal_id").Exists(), + check.That(data.ResourceName).Key("identity.0.tenant_id").Exists(), + check.That(data.ResourceName).Key("scale_settings.#").HasValue("1"), + check.That(data.ResourceName).Key("scale_settings.0.max_node_count").Exists(), + check.That(data.ResourceName).Key("scale_settings.0.min_node_count").Exists(), + check.That(data.ResourceName).Key("scale_settings.0.scale_down_nodes_after_idle_duration").Exists(), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func (r ComputeClusterResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + computeClusterClient := client.MachineLearning.MachineLearningComputeClient + id, err := parse.ComputeClusterID(state.ID) + + if err != nil { + return nil, err + } + + computeResource, err := computeClusterClient.Get(ctx, id.ResourceGroup, id.WorkspaceName, id.ComputeName) + if err != nil { + if utils.ResponseWasNotFound(computeResource.Response) { + return utils.Bool(false), nil + } + return nil, fmt.Errorf("retrieving Machine Learning Compute Cluster %q: %+v", state.ID, err) + } + return utils.Bool(computeResource.Properties != nil), nil +} + +func (r ComputeClusterResource) basic(data acceptance.TestData) string { + template := r.template_basic(data) + return fmt.Sprintf(` +%s + +resource "azurerm_machine_learning_compute_cluster" "test" { + name = "CC-%d" + location = azurerm_resource_group.test.location + vm_priority = "LowPriority" + vm_size = "STANDARD_DS2_V2" + machine_learning_workspace_id = azurerm_machine_learning_workspace.test.id + + scale_settings { + min_node_count = 0 + max_node_count = 1 + scale_down_nodes_after_idle_duration = "PT30S" # 30 seconds + } + + identity { + type = "SystemAssigned" + } +} +`, template, data.RandomIntOfLength(8)) +} + +func (r ComputeClusterResource) complete(data acceptance.TestData) string { + template := r.template_complete(data) + return fmt.Sprintf(` +%s + +resource "azurerm_machine_learning_compute_cluster" "test" { + name = "CC-%d" + location = azurerm_resource_group.test.location + vm_priority = "LowPriority" + vm_size = "STANDARD_DS2_V2" + machine_learning_workspace_id = azurerm_machine_learning_workspace.test.id + subnet_resource_id = azurerm_subnet.test.id + + scale_settings { + min_node_count = 0 + max_node_count = 1 + scale_down_nodes_after_idle_duration = "PT30S" # 30 seconds + } + + identity { + type = "SystemAssigned" + } +} +`, template, data.RandomIntOfLength(8)) +} + +func (r ComputeClusterResource) requiresImport(data acceptance.TestData) string { + template := r.basic(data) + return fmt.Sprintf(` +%s + +resource "azurerm_machine_learning_compute_cluster" "import" { + name = azurerm_machine_learning_compute_cluster.test.name + location = azurerm_machine_learning_compute_cluster.test.location + vm_priority = azurerm_machine_learning_compute_cluster.test.vm_priority + vm_size = azurerm_machine_learning_compute_cluster.test.vm_size + machine_learning_workspace_id = azurerm_machine_learning_compute_cluster.test.machine_learning_workspace_id + + scale_settings { + min_node_count = 0 + max_node_count = 1 + scale_down_nodes_after_idle_duration = "PT2M" # 120 seconds + } + + identity { + type = "SystemAssigned" + } +} + +`, template) +} + +func (r ComputeClusterResource) template_basic(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +data "azurerm_client_config" "current" {} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-ml-%[1]d" + location = "%[2]s" + tags = { + "stage" = "test" + } +} + +resource "azurerm_application_insights" "test" { + name = "acctestai-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + application_type = "web" +} + +resource "azurerm_key_vault" "test" { + name = "acctestvault%[3]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + tenant_id = data.azurerm_client_config.current.tenant_id + + sku_name = "standard" + + purge_protection_enabled = true +} + +resource "azurerm_storage_account" "test" { + name = "acctestsa%[4]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + account_tier = "Standard" + account_replication_type = "LRS" +} + +resource "azurerm_machine_learning_workspace" "test" { + name = "acctest-MLW%[5]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + application_insights_id = azurerm_application_insights.test.id + key_vault_id = azurerm_key_vault.test.id + storage_account_id = azurerm_storage_account.test.id + + identity { + type = "SystemAssigned" + } +} +`, data.RandomInteger, data.Locations.Primary, + data.RandomIntOfLength(12), data.RandomIntOfLength(15), data.RandomIntOfLength(16), + data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger) +} + +func (r ComputeClusterResource) template_complete(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +data "azurerm_client_config" "current" {} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-ml-%[1]d" + location = "%[2]s" + tags = { + "stage" = "test" + } +} + +resource "azurerm_application_insights" "test" { + name = "acctestai-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + application_type = "web" +} + +resource "azurerm_key_vault" "test" { + name = "acctestvault%[3]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + tenant_id = data.azurerm_client_config.current.tenant_id + + sku_name = "standard" + + purge_protection_enabled = true +} + +resource "azurerm_storage_account" "test" { + name = "acctestsa%[4]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + account_tier = "Standard" + account_replication_type = "LRS" +} + +resource "azurerm_machine_learning_workspace" "test" { + name = "acctest-MLW%[5]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + application_insights_id = azurerm_application_insights.test.id + key_vault_id = azurerm_key_vault.test.id + storage_account_id = azurerm_storage_account.test.id + + identity { + type = "SystemAssigned" + } +} + +resource "azurerm_virtual_network" "test" { + name = "acctestvirtnet%[6]d" + address_space = ["10.1.0.0/16"] + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_subnet" "test" { + name = "acctestsubnet%[7]d" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name + address_prefix = "10.1.0.0/24" +} +`, data.RandomInteger, data.Locations.Primary, + data.RandomIntOfLength(12), data.RandomIntOfLength(15), data.RandomIntOfLength(16), + data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger) +} diff --git a/azurerm/internal/services/machinelearning/machine_learning_inference_cluster_resource.go b/azurerm/internal/services/machinelearning/machine_learning_inference_cluster_resource.go index 20f9302ca67c..2f95c617f31b 100644 --- a/azurerm/internal/services/machinelearning/machine_learning_inference_cluster_resource.go +++ b/azurerm/internal/services/machinelearning/machine_learning_inference_cluster_resource.go @@ -6,19 +6,15 @@ import ( "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-03-01/containerservice" "github.com/Azure/azure-sdk-for-go/services/machinelearningservices/mgmt/2020-04-01/machinelearningservices" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/machinelearning/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/machinelearning/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" - + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -91,22 +87,39 @@ func resourceAksInferenceCluster() *pluginsdk.Resource { Elem: &pluginsdk.Resource{ Schema: map[string]*pluginsdk.Schema{ "cert": { - Type: pluginsdk.TypeString, - Optional: true, - ForceNew: true, - Default: "", + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + Default: "", + ConflictsWith: []string{"ssl.0.leaf_domain_label", "ssl.0.overwrite_existing_domain"}, }, "key": { - Type: pluginsdk.TypeString, - Optional: true, - ForceNew: true, - Default: "", + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + Default: "", + ConflictsWith: []string{"ssl.0.leaf_domain_label", "ssl.0.overwrite_existing_domain"}, }, "cname": { - Type: pluginsdk.TypeString, - Optional: true, - ForceNew: true, - Default: "", + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + Default: "", + ConflictsWith: []string{"ssl.0.leaf_domain_label", "ssl.0.overwrite_existing_domain"}, + }, + "leaf_domain_label": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + Default: "", + ConflictsWith: []string{"ssl.0.cert", "ssl.0.key", "ssl.0.cname"}, + }, + "overwrite_existing_domain": { + Type: pluginsdk.TypeBool, + Optional: true, + ForceNew: true, + Default: "", + ConflictsWith: []string{"ssl.0.cert", "ssl.0.key", "ssl.0.cname"}, }, }, }, @@ -276,10 +289,17 @@ func expandSSLConfig(input []interface{}) *machinelearningservices.SslConfigurat sslStatus = "Enabled" } + if !(v["leaf_domain_label"].(string) == "") { + sslStatus = "Auto" + v["cname"] = "" + } + return &machinelearningservices.SslConfiguration{ - Status: machinelearningservices.Status1(sslStatus), - Cert: utils.String(v["cert"].(string)), - Key: utils.String(v["key"].(string)), - Cname: utils.String(v["cname"].(string)), + Status: machinelearningservices.Status1(sslStatus), + Cert: utils.String(v["cert"].(string)), + Key: utils.String(v["key"].(string)), + Cname: utils.String(v["cname"].(string)), + LeafDomainLabel: utils.String(v["leaf_domain_label"].(string)), + OverwriteExistingDomain: utils.Bool(v["overwrite_existing_domain"].(bool)), } } diff --git a/azurerm/internal/services/machinelearning/machine_learning_inference_cluster_resource_test.go b/azurerm/internal/services/machinelearning/machine_learning_inference_cluster_resource_test.go index fee92d8e671c..a05b1b408273 100644 --- a/azurerm/internal/services/machinelearning/machine_learning_inference_cluster_resource_test.go +++ b/azurerm/internal/services/machinelearning/machine_learning_inference_cluster_resource_test.go @@ -45,13 +45,28 @@ func TestAccInferenceCluster_requiresImport(t *testing.T) { }) } -func TestAccInferenceCluster_complete(t *testing.T) { +func TestAccInferenceCluster_completeCustomSSL(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_machine_learning_inference_cluster", "test") r := InferenceClusterResource{} data.ResourceTest(t, r, []acceptance.TestStep{ { - Config: r.complete(data), + Config: r.completeCustomSSL(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("ssl"), + }) +} + +func TestAccInferenceCluster_completeMicrosoftSSL(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_machine_learning_inference_cluster", "test") + r := InferenceClusterResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.completeMicrosoftSSL(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), @@ -112,7 +127,7 @@ resource "azurerm_machine_learning_inference_cluster" "test" { `, r.templateDevTest(data), data.RandomIntOfLength(8)) } -func (r InferenceClusterResource) complete(data acceptance.TestData) string { +func (r InferenceClusterResource) completeCustomSSL(data acceptance.TestData) string { return fmt.Sprintf(` %s @@ -131,7 +146,28 @@ resource "azurerm_machine_learning_inference_cluster" "test" { tags = { ENV = "Test" } +} +`, r.templateDevTest(data), data.RandomIntOfLength(8)) +} + +func (r InferenceClusterResource) completeMicrosoftSSL(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_machine_learning_inference_cluster" "test" { + name = "AIC-%d" + machine_learning_workspace_id = azurerm_machine_learning_workspace.test.id + location = azurerm_resource_group.test.location + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + cluster_purpose = "DevTest" + ssl { + leaf_domain_label = "contoso" + overwrite_existing_domain = true + } + tags = { + ENV = "Test" + } } `, r.templateDevTest(data), data.RandomIntOfLength(8)) } @@ -155,7 +191,6 @@ resource "azurerm_machine_learning_inference_cluster" "test" { tags = { ENV = "Production" } - } `, r.templateFastProd(data), data.RandomIntOfLength(8)) } diff --git a/azurerm/internal/services/machinelearning/parse/compute_cluster.go b/azurerm/internal/services/machinelearning/parse/compute_cluster.go new file mode 100644 index 000000000000..4dcc2dd18144 --- /dev/null +++ b/azurerm/internal/services/machinelearning/parse/compute_cluster.go @@ -0,0 +1,75 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type ComputeClusterId struct { + SubscriptionId string + ResourceGroup string + WorkspaceName string + ComputeName string +} + +func NewComputeClusterID(subscriptionId, resourceGroup, workspaceName, computeName string) ComputeClusterId { + return ComputeClusterId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + WorkspaceName: workspaceName, + ComputeName: computeName, + } +} + +func (id ComputeClusterId) String() string { + segments := []string{ + fmt.Sprintf("Compute Name %q", id.ComputeName), + fmt.Sprintf("Workspace Name %q", id.WorkspaceName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Compute Cluster", segmentsStr) +} + +func (id ComputeClusterId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.MachineLearningServices/workspaces/%s/computes/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.WorkspaceName, id.ComputeName) +} + +// ComputeClusterID parses a ComputeCluster ID into an ComputeClusterId struct +func ComputeClusterID(input string) (*ComputeClusterId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := ComputeClusterId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.WorkspaceName, err = id.PopSegment("workspaces"); err != nil { + return nil, err + } + if resourceId.ComputeName, err = id.PopSegment("computes"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/machinelearning/parse/compute_cluster_test.go b/azurerm/internal/services/machinelearning/parse/compute_cluster_test.go new file mode 100644 index 000000000000..de390027ca48 --- /dev/null +++ b/azurerm/internal/services/machinelearning/parse/compute_cluster_test.go @@ -0,0 +1,128 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = ComputeClusterId{} + +func TestComputeClusterIDFormatter(t *testing.T) { + actual := NewComputeClusterID("00000000-0000-0000-0000-000000000000", "resGroup1", "workspace1", "cluster1").ID() + expected := "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.MachineLearningServices/workspaces/workspace1/computes/cluster1" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestComputeClusterID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ComputeClusterId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/", + Error: true, + }, + + { + // missing WorkspaceName + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.MachineLearningServices/", + Error: true, + }, + + { + // missing value for WorkspaceName + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.MachineLearningServices/workspaces/", + Error: true, + }, + + { + // missing ComputeName + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.MachineLearningServices/workspaces/workspace1/", + Error: true, + }, + + { + // missing value for ComputeName + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.MachineLearningServices/workspaces/workspace1/computes/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.MachineLearningServices/workspaces/workspace1/computes/cluster1", + Expected: &ComputeClusterId{ + SubscriptionId: "00000000-0000-0000-0000-000000000000", + ResourceGroup: "resGroup1", + WorkspaceName: "workspace1", + ComputeName: "cluster1", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/00000000-0000-0000-0000-000000000000/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.MACHINELEARNINGSERVICES/WORKSPACES/WORKSPACE1/COMPUTES/CLUSTER1", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ComputeClusterID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.WorkspaceName != v.Expected.WorkspaceName { + t.Fatalf("Expected %q but got %q for WorkspaceName", v.Expected.WorkspaceName, actual.WorkspaceName) + } + if actual.ComputeName != v.Expected.ComputeName { + t.Fatalf("Expected %q but got %q for ComputeName", v.Expected.ComputeName, actual.ComputeName) + } + } +} diff --git a/azurerm/internal/services/machinelearning/registration.go b/azurerm/internal/services/machinelearning/registration.go index e8fcd46fdefb..5d2caba3af04 100644 --- a/azurerm/internal/services/machinelearning/registration.go +++ b/azurerm/internal/services/machinelearning/registration.go @@ -29,5 +29,6 @@ func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { return map[string]*pluginsdk.Resource{ "azurerm_machine_learning_workspace": resourceMachineLearningWorkspace(), "azurerm_machine_learning_inference_cluster": resourceAksInferenceCluster(), + "azurerm_machine_learning_compute_cluster": resourceComputeCluster(), } } diff --git a/azurerm/internal/services/machinelearning/resourceids.go b/azurerm/internal/services/machinelearning/resourceids.go index 18ebbab752ba..05f1cce453cb 100644 --- a/azurerm/internal/services/machinelearning/resourceids.go +++ b/azurerm/internal/services/machinelearning/resourceids.go @@ -1,5 +1,6 @@ package machinelearning +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=ComputeCluster -id=/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.MachineLearningServices/workspaces/workspace1/computes/cluster1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=InferenceCluster -id=/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.MachineLearningServices/workspaces/workspace1/computes/cluster1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=KubernetesCluster -id=/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.ContainerService/managedClusters/cluster1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=Workspace -id=/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.MachineLearningServices/workspaces/workspace1 diff --git a/azurerm/internal/services/machinelearning/validate/compute_cluster_id.go b/azurerm/internal/services/machinelearning/validate/compute_cluster_id.go new file mode 100644 index 000000000000..45d84480db6d --- /dev/null +++ b/azurerm/internal/services/machinelearning/validate/compute_cluster_id.go @@ -0,0 +1,23 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/machinelearning/parse" +) + +func ComputeClusterID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := parse.ComputeClusterID(v); err != nil { + errors = append(errors, err) + } + + return +} diff --git a/azurerm/internal/services/machinelearning/validate/compute_cluster_id_test.go b/azurerm/internal/services/machinelearning/validate/compute_cluster_id_test.go new file mode 100644 index 000000000000..540d30dd9d62 --- /dev/null +++ b/azurerm/internal/services/machinelearning/validate/compute_cluster_id_test.go @@ -0,0 +1,88 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import "testing" + +func TestComputeClusterID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/", + Valid: false, + }, + + { + // missing WorkspaceName + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.MachineLearningServices/", + Valid: false, + }, + + { + // missing value for WorkspaceName + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.MachineLearningServices/workspaces/", + Valid: false, + }, + + { + // missing ComputeName + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.MachineLearningServices/workspaces/workspace1/", + Valid: false, + }, + + { + // missing value for ComputeName + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.MachineLearningServices/workspaces/workspace1/computes/", + Valid: false, + }, + + { + // valid + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.MachineLearningServices/workspaces/workspace1/computes/cluster1", + Valid: true, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/00000000-0000-0000-0000-000000000000/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.MACHINELEARNINGSERVICES/WORKSPACES/WORKSPACE1/COMPUTES/CLUSTER1", + Valid: false, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := ComputeClusterID(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/azurerm/internal/services/maintenance/client/client.go b/azurerm/internal/services/maintenance/client/client.go index 4205000e7e4e..51f89d2ea3f3 100644 --- a/azurerm/internal/services/maintenance/client/client.go +++ b/azurerm/internal/services/maintenance/client/client.go @@ -1,7 +1,7 @@ package client import ( - "github.com/Azure/azure-sdk-for-go/services/preview/maintenance/mgmt/2018-06-01-preview/maintenance" + "github.com/Azure/azure-sdk-for-go/services/maintenance/mgmt/2021-05-01/maintenance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/common" ) diff --git a/azurerm/internal/services/maintenance/maintenance_assignment_dedicated_host_resource.go b/azurerm/internal/services/maintenance/maintenance_assignment_dedicated_host_resource.go index 3ebe4e7bcf01..987257fe7a85 100644 --- a/azurerm/internal/services/maintenance/maintenance_assignment_dedicated_host_resource.go +++ b/azurerm/internal/services/maintenance/maintenance_assignment_dedicated_host_resource.go @@ -6,7 +6,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/preview/maintenance/mgmt/2018-06-01-preview/maintenance" + "github.com/Azure/azure-sdk-for-go/services/maintenance/mgmt/2021-05-01/maintenance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -67,12 +67,15 @@ func resourceArmMaintenanceAssignmentDedicatedHostCreate(d *pluginsdk.ResourceDa dedicatedHostIdRaw := d.Get("dedicated_host_id").(string) dedicatedHostId, _ := parseCompute.DedicatedHostID(dedicatedHostIdRaw) - existing, err := getMaintenanceAssignmentDedicatedHost(ctx, client, dedicatedHostId, dedicatedHostIdRaw) + existingList, err := getMaintenanceAssignmentDedicatedHost(ctx, client, dedicatedHostId, dedicatedHostIdRaw) if err != nil { return err } - if existing.ID != nil && *existing.ID != "" { - return tf.ImportAsExistsError("azurerm_maintenance_assignment_dedicated_host", *existing.ID) + if existingList != nil && len(*existingList) > 0 { + existing := (*existingList)[0] + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_maintenance_assignment_dedicated_host", *existing.ID) + } } maintenanceConfigurationID := d.Get("maintenance_configuration_id").(string) @@ -80,7 +83,7 @@ func resourceArmMaintenanceAssignmentDedicatedHostCreate(d *pluginsdk.ResourceDa // set assignment name to configuration name assignmentName := configurationId.Name - assignment := maintenance.ConfigurationAssignment{ + configurationAssignment := maintenance.ConfigurationAssignment{ Name: utils.String(assignmentName), Location: utils.String(location.Normalize(d.Get("location").(string))), ConfigurationAssignmentProperties: &maintenance.ConfigurationAssignmentProperties{ @@ -91,7 +94,7 @@ func resourceArmMaintenanceAssignmentDedicatedHostCreate(d *pluginsdk.ResourceDa // It may take a few minutes after starting a VM for it to become available to assign to a configuration err = pluginsdk.Retry(d.Timeout(pluginsdk.TimeoutCreate), func() *pluginsdk.RetryError { - if _, err := client.CreateOrUpdateParent(ctx, dedicatedHostId.ResourceGroup, "Microsoft.Compute", "hostGroups", dedicatedHostId.HostGroupName, "hosts", dedicatedHostId.HostName, assignmentName, assignment); err != nil { + if _, err := client.CreateOrUpdateParent(ctx, dedicatedHostId.ResourceGroup, "Microsoft.Compute", "hostGroups", dedicatedHostId.HostGroupName, "hosts", dedicatedHostId.HostName, assignmentName, configurationAssignment); err != nil { if strings.Contains(err.Error(), "It may take a few minutes after starting a VM for it to become available to assign to a configuration") { return pluginsdk.RetryableError(fmt.Errorf("expected VM is available to assign to a configuration but was in pending state, retrying")) } @@ -99,7 +102,7 @@ func resourceArmMaintenanceAssignmentDedicatedHostCreate(d *pluginsdk.ResourceDa } return nil - }) + }) //lintignore:R006 if err != nil { return err } @@ -108,11 +111,15 @@ func resourceArmMaintenanceAssignmentDedicatedHostCreate(d *pluginsdk.ResourceDa if err != nil { return err } - if resp.ID == nil || *resp.ID == "" { + if resp == nil || len(*resp) == 0 { + return fmt.Errorf("could not find Maintenance assignment (virtual machine scale set ID: %q)", dedicatedHostIdRaw) + } + assignment := (*resp)[0] + if assignment.ID == nil || *assignment.ID == "" { return fmt.Errorf("empty or nil ID of Maintenance Assignment (Dedicated Host ID %q)", dedicatedHostIdRaw) } - d.SetId(*resp.ID) + d.SetId(*assignment.ID) return resourceArmMaintenanceAssignmentDedicatedHostRead(d, meta) } @@ -126,10 +133,15 @@ func resourceArmMaintenanceAssignmentDedicatedHostRead(d *pluginsdk.ResourceData return err } - assignment, err := getMaintenanceAssignmentDedicatedHost(ctx, client, id.DedicatedHostId, id.DedicatedHostIdRaw) + resp, err := getMaintenanceAssignmentDedicatedHost(ctx, client, id.DedicatedHostId, id.DedicatedHostIdRaw) if err != nil { return err } + if resp == nil || len(*resp) == 0 { + d.SetId("") + return nil + } + assignment := (*resp)[0] if assignment.ID == nil || *assignment.ID == "" { return fmt.Errorf("empty or nil ID of Maintenance Assignment (Dedicated Host ID: %q", id.DedicatedHostIdRaw) } @@ -163,19 +175,13 @@ func resourceArmMaintenanceAssignmentDedicatedHostDelete(d *pluginsdk.ResourceDa return nil } -func getMaintenanceAssignmentDedicatedHost(ctx context.Context, client *maintenance.ConfigurationAssignmentsClient, id *parseCompute.DedicatedHostId, dedicatedHostId string) (result maintenance.ConfigurationAssignment, err error) { +func getMaintenanceAssignmentDedicatedHost(ctx context.Context, client *maintenance.ConfigurationAssignmentsClient, id *parseCompute.DedicatedHostId, dedicatedHostId string) (result *[]maintenance.ConfigurationAssignment, err error) { resp, err := client.ListParent(ctx, id.ResourceGroup, "Microsoft.Compute", "hostGroups", id.HostGroupName, "hosts", id.HostName) if err != nil { if !utils.ResponseWasNotFound(resp.Response) { err = fmt.Errorf("checking for presence of existing Maintenance assignment (Dedicated Host ID %q): %+v", dedicatedHostId, err) return } - return result, nil - } - if resp.Value == nil || len(*resp.Value) == 0 { - err = fmt.Errorf("could not find Maintenance assignment (Dedicated Host ID %q)", dedicatedHostId) - return } - - return (*resp.Value)[0], nil + return resp.Value, nil } diff --git a/azurerm/internal/services/maintenance/maintenance_assignment_virtual_machine_resource.go b/azurerm/internal/services/maintenance/maintenance_assignment_virtual_machine_resource.go index 0d76622b35a5..8b5dc77d76e5 100644 --- a/azurerm/internal/services/maintenance/maintenance_assignment_virtual_machine_resource.go +++ b/azurerm/internal/services/maintenance/maintenance_assignment_virtual_machine_resource.go @@ -6,7 +6,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/preview/maintenance/mgmt/2018-06-01-preview/maintenance" + "github.com/Azure/azure-sdk-for-go/services/maintenance/mgmt/2021-05-01/maintenance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -67,12 +67,15 @@ func resourceArmMaintenanceAssignmentVirtualMachineCreate(d *pluginsdk.ResourceD virtualMachineIdRaw := d.Get("virtual_machine_id").(string) virtualMachineId, _ := parseCompute.VirtualMachineID(virtualMachineIdRaw) - existing, err := getMaintenanceAssignmentVirtualMachine(ctx, client, virtualMachineId, virtualMachineIdRaw) + existingList, err := getMaintenanceAssignmentVirtualMachine(ctx, client, virtualMachineId, virtualMachineIdRaw) if err != nil { return err } - if existing.ID != nil && *existing.ID != "" { - return tf.ImportAsExistsError("azurerm_maintenance_assignment_virtual_machine", *existing.ID) + if existingList != nil && len(*existingList) > 0 { + existing := (*existingList)[0] + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_maintenance_assignment_virtual_machine", *existing.ID) + } } maintenanceConfigurationID := d.Get("maintenance_configuration_id").(string) @@ -80,7 +83,7 @@ func resourceArmMaintenanceAssignmentVirtualMachineCreate(d *pluginsdk.ResourceD // set assignment name to configuration name assignmentName := configurationId.Name - assignment := maintenance.ConfigurationAssignment{ + configurationAssignment := maintenance.ConfigurationAssignment{ Name: utils.String(assignmentName), Location: utils.String(location.Normalize(d.Get("location").(string))), ConfigurationAssignmentProperties: &maintenance.ConfigurationAssignmentProperties{ @@ -91,7 +94,7 @@ func resourceArmMaintenanceAssignmentVirtualMachineCreate(d *pluginsdk.ResourceD // It may take a few minutes after starting a VM for it to become available to assign to a configuration err = pluginsdk.Retry(d.Timeout(pluginsdk.TimeoutCreate), func() *pluginsdk.RetryError { - if _, err := client.CreateOrUpdate(ctx, virtualMachineId.ResourceGroup, "Microsoft.Compute", "virtualMachines", virtualMachineId.Name, assignmentName, assignment); err != nil { + if _, err := client.CreateOrUpdate(ctx, virtualMachineId.ResourceGroup, "Microsoft.Compute", "virtualMachines", virtualMachineId.Name, assignmentName, configurationAssignment); err != nil { if strings.Contains(err.Error(), "It may take a few minutes after starting a VM for it to become available to assign to a configuration") { return pluginsdk.RetryableError(fmt.Errorf("expected VM is available to assign to a configuration but was in pending state, retrying")) } @@ -99,7 +102,7 @@ func resourceArmMaintenanceAssignmentVirtualMachineCreate(d *pluginsdk.ResourceD } return nil - }) + }) //lintignore:R006 if err != nil { return err } @@ -108,11 +111,15 @@ func resourceArmMaintenanceAssignmentVirtualMachineCreate(d *pluginsdk.ResourceD if err != nil { return err } - if resp.ID == nil || *resp.ID == "" { + if resp == nil || len(*resp) == 0 { + return fmt.Errorf("could not find Maintenance assignment (virtual machine ID: %q)", virtualMachineIdRaw) + } + assignment := (*resp)[0] + if assignment.ID == nil || *assignment.ID == "" { return fmt.Errorf("empty or nil ID of Maintenance Assignment (virtual machine ID %q)", virtualMachineIdRaw) } - d.SetId(*resp.ID) + d.SetId(*assignment.ID) return resourceArmMaintenanceAssignmentVirtualMachineRead(d, meta) } @@ -126,10 +133,15 @@ func resourceArmMaintenanceAssignmentVirtualMachineRead(d *pluginsdk.ResourceDat return err } - assignment, err := getMaintenanceAssignmentVirtualMachine(ctx, client, id.VirtualMachineId, id.VirtualMachineIdRaw) + resp, err := getMaintenanceAssignmentVirtualMachine(ctx, client, id.VirtualMachineId, id.VirtualMachineIdRaw) if err != nil { return err } + if resp == nil || len(*resp) == 0 { + d.SetId("") + return nil + } + assignment := (*resp)[0] if assignment.ID == nil || *assignment.ID == "" { return fmt.Errorf("empty or nil ID of Maintenance Assignment (virtual machine ID id: %q", id.VirtualMachineIdRaw) } @@ -163,19 +175,13 @@ func resourceArmMaintenanceAssignmentVirtualMachineDelete(d *pluginsdk.ResourceD return nil } -func getMaintenanceAssignmentVirtualMachine(ctx context.Context, client *maintenance.ConfigurationAssignmentsClient, id *parseCompute.VirtualMachineId, virtualMachineId string) (result maintenance.ConfigurationAssignment, err error) { +func getMaintenanceAssignmentVirtualMachine(ctx context.Context, client *maintenance.ConfigurationAssignmentsClient, id *parseCompute.VirtualMachineId, virtualMachineId string) (result *[]maintenance.ConfigurationAssignment, err error) { resp, err := client.List(ctx, id.ResourceGroup, "Microsoft.Compute", "virtualMachines", id.Name) if err != nil { if !utils.ResponseWasNotFound(resp.Response) { err = fmt.Errorf("checking for presence of existing Maintenance assignment (virtual machine ID: %q): %+v", virtualMachineId, err) return } - return result, nil - } - if resp.Value == nil || len(*resp.Value) == 0 { - err = fmt.Errorf("could not find Maintenance assignment (virtual machine ID: %q)", virtualMachineId) - return } - - return (*resp.Value)[0], nil + return resp.Value, nil } diff --git a/azurerm/internal/services/maintenance/maintenance_assignment_virtual_machine_scale_set_resource.go b/azurerm/internal/services/maintenance/maintenance_assignment_virtual_machine_scale_set_resource.go new file mode 100644 index 000000000000..9c8e8f8a89d9 --- /dev/null +++ b/azurerm/internal/services/maintenance/maintenance_assignment_virtual_machine_scale_set_resource.go @@ -0,0 +1,176 @@ +package maintenance + +import ( + "context" + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/services/maintenance/mgmt/2021-05-01/maintenance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" + parseCompute "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + validateCompute "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/maintenance/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/maintenance/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceArmMaintenanceAssignmentVirtualMachineScaleSet() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceArmMaintenanceAssignmentVirtualMachineScaleSetCreate, + Read: resourceArmMaintenanceAssignmentVirtualMachineScaleSetRead, + Delete: resourceArmMaintenanceAssignmentVirtualMachineScaleSetDelete, + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.MaintenanceAssignmentVirtualMachineScaleSetID(id) + return err + }), + + Schema: map[string]*pluginsdk.Schema{ + "location": azure.SchemaLocation(), + + "maintenance_configuration_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.MaintenanceConfigurationID, + }, + + "virtual_machine_scale_set_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateCompute.VirtualMachineScaleSetID, + DiffSuppressFunc: suppress.CaseDifference, + }, + }, + } +} + +func resourceArmMaintenanceAssignmentVirtualMachineScaleSetCreate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Maintenance.ConfigurationAssignmentsClient + ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) + defer cancel() + + virtualMachineScaleSetIdRaw := d.Get("virtual_machine_scale_set_id").(string) + virtualMachineScaleSetId, _ := parseCompute.VirtualMachineScaleSetID(virtualMachineScaleSetIdRaw) + + existingList, err := getMaintenanceAssignmentVirtualMachineScaleSet(ctx, client, virtualMachineScaleSetId, virtualMachineScaleSetIdRaw) + if err != nil { + return err + } + if existingList != nil && len(*existingList) > 0 { + existing := (*existingList)[0] + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_maintenance_assignment_virtual_machine_scale_set", *existing.ID) + } + } + + maintenanceConfigurationID := d.Get("maintenance_configuration_id").(string) + configurationId, _ := parse.MaintenanceConfigurationIDInsensitively(maintenanceConfigurationID) + + // set assignment name to configuration name + assignmentName := configurationId.Name + configurationAssignment := maintenance.ConfigurationAssignment{ + Name: utils.String(assignmentName), + Location: utils.String(location.Normalize(d.Get("location").(string))), + ConfigurationAssignmentProperties: &maintenance.ConfigurationAssignmentProperties{ + MaintenanceConfigurationID: utils.String(maintenanceConfigurationID), + ResourceID: utils.String(virtualMachineScaleSetIdRaw), + }, + } + + _, err = client.CreateOrUpdate(ctx, virtualMachineScaleSetId.ResourceGroup, "Microsoft.Compute", "virtualMachineScaleSets", virtualMachineScaleSetId.Name, assignmentName, configurationAssignment) + if err != nil { + return err + } + + resp, err := getMaintenanceAssignmentVirtualMachineScaleSet(ctx, client, virtualMachineScaleSetId, virtualMachineScaleSetIdRaw) + if err != nil { + return err + } + if resp == nil || len(*resp) == 0 { + return fmt.Errorf("could not find Maintenance assignment (virtual machine scale set ID: %q)", virtualMachineScaleSetIdRaw) + } + assignment := (*resp)[0] + if assignment.ID == nil || *assignment.ID == "" { + return fmt.Errorf("empty or nil ID of Maintenance Assignment (virtual machine scale set ID %q)", virtualMachineScaleSetIdRaw) + } + + d.SetId(*assignment.ID) + return resourceArmMaintenanceAssignmentVirtualMachineScaleSetRead(d, meta) +} + +func resourceArmMaintenanceAssignmentVirtualMachineScaleSetRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Maintenance.ConfigurationAssignmentsClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.MaintenanceAssignmentVirtualMachineScaleSetID(d.Id()) + if err != nil { + return err + } + + resp, err := getMaintenanceAssignmentVirtualMachineScaleSet(ctx, client, id.VirtualMachineScaleSetId, id.VirtualMachineScaleSetIdRaw) + if err != nil { + return err + } + if resp == nil || len(*resp) == 0 { + d.SetId("") + return nil + } + assignment := (*resp)[0] + if assignment.ID == nil || *assignment.ID == "" { + return fmt.Errorf("empty or nil ID of Maintenance Assignment (virtual machine scale set ID id: %q", id.VirtualMachineScaleSetIdRaw) + } + + // in list api, `ResourceID` returned is always nil + virtualMachineScaleSetId := "" + if id.VirtualMachineScaleSetId != nil { + virtualMachineScaleSetId = id.VirtualMachineScaleSetId.ID() + } + d.Set("virtual_machine_scale_set_id", virtualMachineScaleSetId) + if props := assignment.ConfigurationAssignmentProperties; props != nil { + d.Set("maintenance_configuration_id", props.MaintenanceConfigurationID) + } + return nil +} + +func resourceArmMaintenanceAssignmentVirtualMachineScaleSetDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Maintenance.ConfigurationAssignmentsClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.MaintenanceAssignmentVirtualMachineScaleSetID(d.Id()) + if err != nil { + return err + } + + if _, err := client.Delete(ctx, id.VirtualMachineScaleSetId.ResourceGroup, "Microsoft.Compute", "virtualMachineScaleSets", id.VirtualMachineScaleSetId.Name, id.Name); err != nil { + return fmt.Errorf("deleting Maintenance Assignment to resource %q: %+v", id.VirtualMachineScaleSetIdRaw, err) + } + + return nil +} + +func getMaintenanceAssignmentVirtualMachineScaleSet(ctx context.Context, client *maintenance.ConfigurationAssignmentsClient, id *parseCompute.VirtualMachineScaleSetId, virtualMachineScaleSetId string) (result *[]maintenance.ConfigurationAssignment, err error) { + resp, err := client.List(ctx, id.ResourceGroup, "Microsoft.Compute", "virtualMachineScaleSets", id.Name) + if err != nil { + if !utils.ResponseWasNotFound(resp.Response) { + err = fmt.Errorf("checking for presence of existing Maintenance assignment (virtual machine scale set ID: %q): %+v", virtualMachineScaleSetId, err) + return + } + } + return resp.Value, nil +} diff --git a/azurerm/internal/services/maintenance/maintenance_assignment_virtual_machine_scale_set_resource_test.go b/azurerm/internal/services/maintenance/maintenance_assignment_virtual_machine_scale_set_resource_test.go new file mode 100644 index 000000000000..4317f844a46f --- /dev/null +++ b/azurerm/internal/services/maintenance/maintenance_assignment_virtual_machine_scale_set_resource_test.go @@ -0,0 +1,224 @@ +package maintenance_test + +import ( + "context" + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/maintenance/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type MaintenanceAssignmentVirtualMachineScaleSetResource struct { +} + +func TestAccMaintenanceAssignmentVirtualMachineScaleSet_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_maintenance_assignment_virtual_machine_scale_set", "test") + r := MaintenanceAssignmentVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + // location not returned by list rest api + data.ImportStep("location"), + }) +} + +func TestAccMaintenanceAssignmentVirtualMachineScaleSet_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_maintenance_assignment_virtual_machine_scale_set", "test") + r := MaintenanceAssignmentVirtualMachineScaleSetResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func (MaintenanceAssignmentVirtualMachineScaleSetResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.MaintenanceAssignmentVirtualMachineScaleSetID(state.ID) + if err != nil { + return nil, err + } + + resp, err := clients.Maintenance.ConfigurationAssignmentsClient.List(ctx, id.VirtualMachineScaleSetId.ResourceGroup, "Microsoft.Compute", "virtualMachineScaleSets", id.VirtualMachineScaleSetId.Name) + if err != nil { + return nil, fmt.Errorf("retrieving Maintenance Assignment Virtual Machine Scale Set (target resource id: %q): %v", id.VirtualMachineScaleSetIdRaw, err) + } + + return utils.Bool(resp.Value != nil && len(*resp.Value) != 0), nil +} + +func (r MaintenanceAssignmentVirtualMachineScaleSetResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_maintenance_assignment_virtual_machine_scale_set" "test" { + location = azurerm_resource_group.test.location + maintenance_configuration_id = azurerm_maintenance_configuration.test.id + virtual_machine_scale_set_id = azurerm_linux_virtual_machine_scale_set.test.id +} +`, r.template(data)) +} + +func (r MaintenanceAssignmentVirtualMachineScaleSetResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_maintenance_assignment_virtual_machine_scale_set" "import" { + location = azurerm_maintenance_assignment_virtual_machine_scale_set.test.location + maintenance_configuration_id = azurerm_maintenance_assignment_virtual_machine_scale_set.test.maintenance_configuration_id + virtual_machine_scale_set_id = azurerm_maintenance_assignment_virtual_machine_scale_set.test.virtual_machine_scale_set_id +} +`, r.basic(data)) +} + +func (MaintenanceAssignmentVirtualMachineScaleSetResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_public_ip" "test" { + name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + allocation_method = "Static" +} + +resource "azurerm_lb" "test" { + name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + frontend_ip_configuration { + name = "internal" + public_ip_address_id = azurerm_public_ip.test.id + } +} + +resource "azurerm_lb_backend_address_pool" "test" { + name = "test" + resource_group_name = azurerm_resource_group.test.name + loadbalancer_id = azurerm_lb.test.id +} + +resource "azurerm_lb_probe" "test" { + resource_group_name = azurerm_resource_group.test.name + loadbalancer_id = azurerm_lb.test.id + name = "acctest-lb-probe" + port = 22 + protocol = "Tcp" +} + +resource "azurerm_lb_rule" "test" { + name = "AccTestLBRule" + resource_group_name = azurerm_resource_group.test.name + loadbalancer_id = azurerm_lb.test.id + probe_id = azurerm_lb_probe.test.id + backend_address_pool_id = azurerm_lb_backend_address_pool.test.id + frontend_ip_configuration_name = "internal" + protocol = "Tcp" + frontend_port = 22 + backend_port = 22 +} + +resource "azurerm_maintenance_configuration" "test" { + name = "acctest-MC%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + scope = "OSImage" + visibility = "Custom" + + window { + start_date_time = "5555-12-31 00:00" + expiration_date_time = "6666-12-31 00:00" + duration = "06:00" + time_zone = "Pacific Standard Time" + recur_every = "1Days" + } +} + +resource "azurerm_virtual_network" "test" { + name = "acctestnw-%d" + address_space = ["10.0.0.0/16"] + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_subnet" "test" { + name = "internal" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name + address_prefix = "10.0.2.0/24" +} + +resource "azurerm_linux_virtual_machine_scale_set" "test" { + name = "acctestvmss-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + sku = "Standard_F2" + instances = 1 + admin_username = "adminuser" + admin_password = "P@ssword1234!" + upgrade_mode = "Automatic" + health_probe_id = azurerm_lb_probe.test.id + disable_password_authentication = false + + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } + + os_disk { + storage_account_type = "Standard_LRS" + caching = "ReadWrite" + } + + network_interface { + name = "example" + primary = true + + ip_configuration { + name = "internal" + primary = true + subnet_id = azurerm_subnet.test.id + load_balancer_backend_address_pool_ids = [azurerm_lb_backend_address_pool.test.id] + } + } + + automatic_os_upgrade_policy { + disable_automatic_rollback = true + enable_automatic_os_upgrade = true + } + + rolling_upgrade_policy { + max_batch_instance_percent = 20 + max_unhealthy_instance_percent = 20 + max_unhealthy_upgraded_instance_percent = 20 + pause_time_between_batches = "PT0S" + } + + depends_on = ["azurerm_lb_rule.test"] +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger) +} diff --git a/azurerm/internal/services/maintenance/maintenance_configuration_data_source.go b/azurerm/internal/services/maintenance/maintenance_configuration_data_source.go index f18faeb02c27..27d9f65ec393 100644 --- a/azurerm/internal/services/maintenance/maintenance_configuration_data_source.go +++ b/azurerm/internal/services/maintenance/maintenance_configuration_data_source.go @@ -38,6 +38,48 @@ func dataSourceMaintenanceConfiguration() *pluginsdk.Resource { Computed: true, }, + "visibility": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "window": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "start_date_time": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "expiration_date_time": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "duration": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "time_zone": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "recur_every": { + Type: pluginsdk.TypeString, + Computed: true, + }, + }, + }, + }, + + "properties": { + Type: pluginsdk.TypeMap, + Computed: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + "tags": tags.SchemaDataSource(), }, } @@ -54,7 +96,7 @@ func dataSourceArmMaintenanceConfigurationRead(d *pluginsdk.ResourceData, meta i resp, err := client.Get(ctx, resGroup, name) if err != nil { if utils.ResponseWasNotFound(resp.Response) { - return fmt.Errorf("Maintenance Configuration %q was not found in Resource Group %q", name, resGroup) + return fmt.Errorf("maintenance Configuration %q was not found in Resource Group %q", name, resGroup) } return fmt.Errorf("retrieving Maintenance Configuration %q (Resource Group %q): %+v", name, resGroup, err) } @@ -68,6 +110,13 @@ func dataSourceArmMaintenanceConfigurationRead(d *pluginsdk.ResourceData, meta i d.Set("location", location.NormalizeNilable(resp.Location)) if props := resp.ConfigurationProperties; props != nil { d.Set("scope", props.MaintenanceScope) + d.Set("visibility", props.Visibility) + d.Set("properties", props.ExtensionProperties) + + window := flattenMaintenanceConfigurationWindow(props.Window) + if err := d.Set("window", window); err != nil { + return fmt.Errorf("error setting `window`: %+v", err) + } } return tags.FlattenAndSet(d, resp.Tags) diff --git a/azurerm/internal/services/maintenance/maintenance_configuration_data_source_test.go b/azurerm/internal/services/maintenance/maintenance_configuration_data_source_test.go index b6f0a22eced4..8c2a4254a239 100644 --- a/azurerm/internal/services/maintenance/maintenance_configuration_data_source_test.go +++ b/azurerm/internal/services/maintenance/maintenance_configuration_data_source_test.go @@ -19,9 +19,17 @@ func TestAccMaintenanceConfigurationDataSource_complete(t *testing.T) { { Config: r.complete(data), Check: acceptance.ComposeTestCheckFunc( - check.That(data.ResourceName).Key("scope").HasValue("Host"), + check.That(data.ResourceName).Key("scope").HasValue("SQLDB"), + check.That(data.ResourceName).Key("visibility").HasValue("Custom"), check.That(data.ResourceName).Key("tags.%").HasValue("1"), - check.That(data.ResourceName).Key("tags.env").HasValue("TesT"), + check.That(data.ResourceName).Key("tags.enV").HasValue("TesT"), + check.That(data.ResourceName).Key("window.0.start_date_time").HasValue("5555-12-31 00:00"), + check.That(data.ResourceName).Key("window.0.expiration_date_time").HasValue("6666-12-31 00:00"), + check.That(data.ResourceName).Key("window.0.duration").HasValue("06:00"), + check.That(data.ResourceName).Key("window.0.time_zone").HasValue("Pacific Standard Time"), + check.That(data.ResourceName).Key("window.0.recur_every").HasValue("2Days"), + check.That(data.ResourceName).Key("properties.%").HasValue("1"), + check.That(data.ResourceName).Key("properties.description").HasValue("acceptance test"), ), }, }) diff --git a/azurerm/internal/services/maintenance/maintenance_configuration_resource.go b/azurerm/internal/services/maintenance/maintenance_configuration_resource.go index 6a334356c596..105e173dac33 100644 --- a/azurerm/internal/services/maintenance/maintenance_configuration_resource.go +++ b/azurerm/internal/services/maintenance/maintenance_configuration_resource.go @@ -3,9 +3,10 @@ package maintenance import ( "fmt" "log" + "regexp" "time" - "github.com/Azure/azure-sdk-for-go/services/preview/maintenance/mgmt/2018-06-01-preview/maintenance" + "github.com/Azure/azure-sdk-for-go/services/maintenance/mgmt/2021-05-01/maintenance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -62,26 +63,75 @@ func resourceArmMaintenanceConfiguration() *pluginsdk.Resource { "scope": { Type: pluginsdk.TypeString, Optional: true, - Default: string(maintenance.ScopeAll), + Default: "All", ValidateFunc: validation.StringInSlice([]string{ - string(maintenance.ScopeAll), + "All", // All is still accepted by the API + string(maintenance.ScopeExtension), string(maintenance.ScopeHost), - string(maintenance.ScopeInResource), - string(maintenance.ScopeResource), + string(maintenance.ScopeInGuestPatch), + string(maintenance.ScopeOSImage), + string(maintenance.ScopeSQLDB), + string(maintenance.ScopeSQLManagedInstance), }, false), }, - // There's a bug in the Azure API where the the key of tags is returned in lower-case - // BUG: https://github.com/Azure/azure-rest-api-specs/issues/9075 - // use custom tags defition here to prevent inputting upper case key - "tags": { - Type: pluginsdk.TypeMap, - Optional: true, - ValidateFunc: validate.TagsWithLowerCaseKey, + "visibility": { + Type: pluginsdk.TypeString, + Optional: true, + Default: string(maintenance.VisibilityCustom), + ValidateFunc: validation.StringInSlice([]string{ + string(maintenance.VisibilityCustom), + // Creating public configurations doesn't appear to be supported, API returns `Public Maintenance Configuration must set correct properties` + // string(maintenance.VisibilityPublic), + }, false), + }, + + "window": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "start_date_time": { + Type: pluginsdk.TypeString, + Required: true, + }, + "expiration_date_time": { + Type: pluginsdk.TypeString, + Optional: true, + }, + "duration": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringMatch( + regexp.MustCompile("^(0[0-9]|1[0-9]|2[0-3]):[0-5][0-9]$"), + "duration must match the format HH:mm", + ), + }, + "time_zone": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validate.MaintenanceTimeZone(), + }, + "recur_every": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + }, + }, + + "properties": { + Type: pluginsdk.TypeMap, + Optional: true, Elem: &pluginsdk.Schema{ - Type: pluginsdk.TypeString, + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringIsNotEmpty, }, }, + + "tags": tags.Schema(), }, } } @@ -105,12 +155,22 @@ func resourceArmMaintenanceConfigurationCreateUpdate(d *pluginsdk.ResourceData, } } + scope := d.Get("scope").(string) + visibility := d.Get("visibility").(string) + windowRaw := d.Get("window").([]interface{}) + window := expandMaintenanceConfigurationWindow(windowRaw) + + extensionProperties := utils.ExpandMapStringPtrString(d.Get("properties").(map[string]interface{})) + configuration := maintenance.Configuration{ Name: utils.String(id.Name), Location: utils.String(location.Normalize(d.Get("location").(string))), ConfigurationProperties: &maintenance.ConfigurationProperties{ - MaintenanceScope: maintenance.Scope(d.Get("scope").(string)), - Namespace: utils.String("Microsoft.Maintenance"), + MaintenanceScope: maintenance.Scope(scope), + Visibility: maintenance.Visibility(visibility), + Namespace: utils.String("Microsoft.Maintenance"), + Window: window, + ExtensionProperties: extensionProperties, }, Tags: tags.Expand(d.Get("tags").(map[string]interface{})), } @@ -148,6 +208,13 @@ func resourceArmMaintenanceConfigurationRead(d *pluginsdk.ResourceData, meta int d.Set("location", location.NormalizeNilable(resp.Location)) if props := resp.ConfigurationProperties; props != nil { d.Set("scope", props.MaintenanceScope) + d.Set("visibility", props.Visibility) + d.Set("properties", props.ExtensionProperties) + + window := flattenMaintenanceConfigurationWindow(props.Window) + if err := d.Set("window", window); err != nil { + return fmt.Errorf("error setting `window`: %+v", err) + } } return tags.FlattenAndSet(d, resp.Tags) } @@ -167,3 +234,56 @@ func resourceArmMaintenanceConfigurationDelete(d *pluginsdk.ResourceData, meta i } return nil } + +func expandMaintenanceConfigurationWindow(input []interface{}) *maintenance.Window { + if len(input) == 0 { + return nil + } + + v := input[0].(map[string]interface{}) + startDateTime := v["start_date_time"].(string) + expirationDateTime := v["expiration_date_time"].(string) + duration := v["duration"].(string) + timeZone := v["time_zone"].(string) + recurEvery := v["recur_every"].(string) + window := maintenance.Window{ + StartDateTime: utils.String(startDateTime), + ExpirationDateTime: utils.String(expirationDateTime), + Duration: utils.String(duration), + TimeZone: utils.String(timeZone), + RecurEvery: utils.String(recurEvery), + } + return &window +} + +func flattenMaintenanceConfigurationWindow(input *maintenance.Window) []interface{} { + results := make([]interface{}, 0) + + if v := input; v != nil { + output := make(map[string]interface{}) + + if startDateTime := v.StartDateTime; startDateTime != nil { + output["start_date_time"] = *startDateTime + } + + if expirationDateTime := v.ExpirationDateTime; expirationDateTime != nil { + output["expiration_date_time"] = *expirationDateTime + } + + if duration := v.Duration; duration != nil { + output["duration"] = *duration + } + + if timeZone := v.TimeZone; timeZone != nil { + output["time_zone"] = *timeZone + } + + if recurEvery := v.RecurEvery; recurEvery != nil { + output["recur_every"] = *recurEvery + } + + results = append(results, output) + } + + return results +} diff --git a/azurerm/internal/services/maintenance/maintenance_configuration_resource_test.go b/azurerm/internal/services/maintenance/maintenance_configuration_resource_test.go index c50d7af25690..79ef58b05eee 100644 --- a/azurerm/internal/services/maintenance/maintenance_configuration_resource_test.go +++ b/azurerm/internal/services/maintenance/maintenance_configuration_resource_test.go @@ -26,6 +26,7 @@ func TestAccMaintenanceConfiguration_basic(t *testing.T) { Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("scope").HasValue("All"), + check.That(data.ResourceName).Key("visibility").HasValue("Custom"), ), }, data.ImportStep(), @@ -56,9 +57,17 @@ func TestAccMaintenanceConfiguration_complete(t *testing.T) { Config: r.complete(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("scope").HasValue("Host"), + check.That(data.ResourceName).Key("scope").HasValue("SQLDB"), + check.That(data.ResourceName).Key("visibility").HasValue("Custom"), check.That(data.ResourceName).Key("tags.%").HasValue("1"), - check.That(data.ResourceName).Key("tags.env").HasValue("TesT"), + check.That(data.ResourceName).Key("tags.enV").HasValue("TesT"), + check.That(data.ResourceName).Key("window.0.start_date_time").HasValue("5555-12-31 00:00"), + check.That(data.ResourceName).Key("window.0.expiration_date_time").HasValue("6666-12-31 00:00"), + check.That(data.ResourceName).Key("window.0.duration").HasValue("06:00"), + check.That(data.ResourceName).Key("window.0.time_zone").HasValue("Pacific Standard Time"), + check.That(data.ResourceName).Key("window.0.recur_every").HasValue("2Days"), + check.That(data.ResourceName).Key("properties.%").HasValue("1"), + check.That(data.ResourceName).Key("properties.description").HasValue("acceptance test"), ), }, data.ImportStep(), @@ -75,7 +84,10 @@ func TestAccMaintenanceConfiguration_update(t *testing.T) { Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("scope").HasValue("All"), + check.That(data.ResourceName).Key("visibility").HasValue("Custom"), check.That(data.ResourceName).Key("tags.%").HasValue("0"), + check.That(data.ResourceName).Key("window.#").HasValue("0"), + check.That(data.ResourceName).Key("properties.%").HasValue("0"), ), }, data.ImportStep(), @@ -83,9 +95,17 @@ func TestAccMaintenanceConfiguration_update(t *testing.T) { Config: r.complete(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("scope").HasValue("Host"), + check.That(data.ResourceName).Key("scope").HasValue("SQLDB"), + check.That(data.ResourceName).Key("visibility").HasValue("Custom"), check.That(data.ResourceName).Key("tags.%").HasValue("1"), - check.That(data.ResourceName).Key("tags.env").HasValue("TesT"), + check.That(data.ResourceName).Key("tags.enV").HasValue("TesT"), + check.That(data.ResourceName).Key("window.0.start_date_time").HasValue("5555-12-31 00:00"), + check.That(data.ResourceName).Key("window.0.expiration_date_time").HasValue("6666-12-31 00:00"), + check.That(data.ResourceName).Key("window.0.duration").HasValue("06:00"), + check.That(data.ResourceName).Key("window.0.time_zone").HasValue("Pacific Standard Time"), + check.That(data.ResourceName).Key("window.0.recur_every").HasValue("2Days"), + check.That(data.ResourceName).Key("properties.%").HasValue("1"), + check.That(data.ResourceName).Key("properties.description").HasValue("acceptance test"), ), }, data.ImportStep(), @@ -94,7 +114,10 @@ func TestAccMaintenanceConfiguration_update(t *testing.T) { Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("scope").HasValue("All"), + check.That(data.ResourceName).Key("visibility").HasValue("Custom"), check.That(data.ResourceName).Key("tags.%").HasValue("0"), + check.That(data.ResourceName).Key("window.#").HasValue("0"), + check.That(data.ResourceName).Key("properties.%").HasValue("0"), ), }, data.ImportStep(), @@ -131,6 +154,7 @@ resource "azurerm_maintenance_configuration" "test" { resource_group_name = azurerm_resource_group.test.name location = azurerm_resource_group.test.location scope = "All" + visibility = "Custom" } `, data.RandomInteger, data.Locations.Primary, data.RandomInteger) } @@ -144,6 +168,7 @@ resource "azurerm_maintenance_configuration" "import" { resource_group_name = azurerm_maintenance_configuration.test.resource_group_name location = azurerm_maintenance_configuration.test.location scope = azurerm_maintenance_configuration.test.scope + visibility = azurerm_maintenance_configuration.test.visibility } `, r.basic(data)) } @@ -163,10 +188,23 @@ resource "azurerm_maintenance_configuration" "test" { name = "acctest-MC%d" resource_group_name = azurerm_resource_group.test.name location = azurerm_resource_group.test.location - scope = "Host" + scope = "SQLDB" + visibility = "Custom" + + window { + start_date_time = "5555-12-31 00:00" + expiration_date_time = "6666-12-31 00:00" + duration = "06:00" + time_zone = "Pacific Standard Time" + recur_every = "2Days" + } + + properties = { + description = "acceptance test" + } tags = { - env = "TesT" + enV = "TesT" } } `, data.RandomInteger, data.Locations.Primary, data.RandomInteger) diff --git a/azurerm/internal/services/maintenance/migration/configuration_v0_to_v1.go b/azurerm/internal/services/maintenance/migration/configuration_v0_to_v1.go index 013f4e727890..758a9848d570 100644 --- a/azurerm/internal/services/maintenance/migration/configuration_v0_to_v1.go +++ b/azurerm/internal/services/maintenance/migration/configuration_v0_to_v1.go @@ -4,7 +4,6 @@ import ( "context" "log" - "github.com/Azure/azure-sdk-for-go/services/preview/maintenance/mgmt/2018-06-01-preview/maintenance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/maintenance/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" @@ -37,7 +36,7 @@ func (ConfigurationV0ToV1) Schema() map[string]*pluginsdk.Schema { "scope": { Type: pluginsdk.TypeString, Optional: true, - Default: string(maintenance.ScopeAll), + Default: "All", }, "tags": { diff --git a/azurerm/internal/services/maintenance/parse/maintenance_assignment_virtual_machine_scale_set.go b/azurerm/internal/services/maintenance/parse/maintenance_assignment_virtual_machine_scale_set.go new file mode 100644 index 000000000000..3158ab9ad88b --- /dev/null +++ b/azurerm/internal/services/maintenance/parse/maintenance_assignment_virtual_machine_scale_set.go @@ -0,0 +1,33 @@ +package parse + +import ( + "fmt" + "regexp" + + parseCompute "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" +) + +type MaintenanceAssignmentVirtualMachineScaleSetId struct { + VirtualMachineScaleSetId *parseCompute.VirtualMachineScaleSetId + VirtualMachineScaleSetIdRaw string + Name string +} + +func MaintenanceAssignmentVirtualMachineScaleSetID(input string) (*MaintenanceAssignmentVirtualMachineScaleSetId, error) { + groups := regexp.MustCompile(`^(.+)/providers/Microsoft\.Maintenance/configurationAssignments/([^/]+)$`).FindStringSubmatch(input) + if len(groups) != 3 { + return nil, fmt.Errorf("parsing Maintenance Assignment Virtual Machine Scale Set ID (%q)", input) + } + + targetResourceId, name := groups[1], groups[2] + virtualMachineScaleSetId, err := parseCompute.VirtualMachineScaleSetID(targetResourceId) + if err != nil { + return nil, fmt.Errorf("parsing Maintenance Assignment Virtual Machine Scale Set ID: %q: Expected valid virtual machine scale set ID", input) + } + + return &MaintenanceAssignmentVirtualMachineScaleSetId{ + VirtualMachineScaleSetId: virtualMachineScaleSetId, + VirtualMachineScaleSetIdRaw: targetResourceId, + Name: name, + }, nil +} diff --git a/azurerm/internal/services/maintenance/parse/maintenance_assignment_virtual_machine_scale_set_test.go b/azurerm/internal/services/maintenance/parse/maintenance_assignment_virtual_machine_scale_set_test.go new file mode 100644 index 000000000000..d37ddf36cdee --- /dev/null +++ b/azurerm/internal/services/maintenance/parse/maintenance_assignment_virtual_machine_scale_set_test.go @@ -0,0 +1,96 @@ +package parse + +import ( + "reflect" + "testing" + + parseCompute "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" +) + +func TestMaintenanceAssignmentVirtualMachineScaleSetID(t *testing.T) { + testData := []struct { + Name string + Input string + Error bool + Expect *MaintenanceAssignmentVirtualMachineScaleSetId + }{ + { + Name: "Empty", + Input: "", + Error: true, + }, + { + Name: "No Resource Groups Segment", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000", + Error: true, + }, + { + Name: "No Resource Groups Value", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/", + Error: true, + }, + { + Name: "No target resource type", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resGroup1/providers/microsoft.compute/", + Error: true, + }, + { + Name: "No target resource name", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resGroup1/providers/microsoft.compute/virtualMachineScaleSets/", + Error: true, + }, + { + Name: "No Maintenance Assignment Segment", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resGroup1/providers/microsoft.compute/virtualMachineScaleSets/vmss1/providers/Microsoft.Maintenance/", + Error: true, + }, + { + Name: "No Maintenance Assignment name", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resGroup1/providers/microsoft.compute/virtualMachineScaleSets/vmss1/providers/Microsoft.Maintenance/configurationAssignments/", + Error: true, + }, + { + Name: "ID of Maintenance Assignment to vm", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resGroup1/providers/microsoft.compute/virtualMachineScaleSets/vmss1/providers/Microsoft.Maintenance/configurationAssignments/assign1", + Error: false, + Expect: &MaintenanceAssignmentVirtualMachineScaleSetId{ + VirtualMachineScaleSetIdRaw: "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resGroup1/providers/microsoft.compute/virtualMachineScaleSets/vmss1", + VirtualMachineScaleSetId: &parseCompute.VirtualMachineScaleSetId{ + SubscriptionId: "00000000-0000-0000-0000-000000000000", + ResourceGroup: "resGroup1", + Name: "vmss1", + }, + Name: "assign1", + }, + }, + { + Name: "Wrong Casing", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resGroup1/providers/microsoft.compute/virtualMachineScaleSets/vmss1/providers/Microsoft.Maintenance/ConfigurationAssignments/assign1", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q..", v.Name) + + actual, err := MaintenanceAssignmentVirtualMachineScaleSetID(v.Input) + if err != nil { + if v.Expect == nil { + continue + } + t.Fatalf("Expected a value but got an error: %s", err) + } + + if actual.Name != v.Expect.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expect.Name, actual.Name) + } + + if actual.VirtualMachineScaleSetIdRaw != v.Expect.VirtualMachineScaleSetIdRaw { + t.Fatalf("Expected %q but got %q for VirtualMachineScaleSetIdRaw", v.Expect.VirtualMachineScaleSetIdRaw, actual.VirtualMachineScaleSetIdRaw) + } + + if !reflect.DeepEqual(v.Expect.VirtualMachineScaleSetId, actual.VirtualMachineScaleSetId) { + t.Fatalf("Expected %+v but got %+v", v.Expect.VirtualMachineScaleSetId, actual.VirtualMachineScaleSetId) + } + } +} diff --git a/azurerm/internal/services/maintenance/registration.go b/azurerm/internal/services/maintenance/registration.go index 56ba4cd5bbac..7db896b0812d 100644 --- a/azurerm/internal/services/maintenance/registration.go +++ b/azurerm/internal/services/maintenance/registration.go @@ -24,8 +24,9 @@ func (r Registration) SupportedDataSources() map[string]*pluginsdk.Resource { func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { return map[string]*pluginsdk.Resource{ - "azurerm_maintenance_assignment_dedicated_host": resourceArmMaintenanceAssignmentDedicatedHost(), - "azurerm_maintenance_assignment_virtual_machine": resourceArmMaintenanceAssignmentVirtualMachine(), - "azurerm_maintenance_configuration": resourceArmMaintenanceConfiguration(), + "azurerm_maintenance_assignment_dedicated_host": resourceArmMaintenanceAssignmentDedicatedHost(), + "azurerm_maintenance_assignment_virtual_machine": resourceArmMaintenanceAssignmentVirtualMachine(), + "azurerm_maintenance_assignment_virtual_machine_scale_set": resourceArmMaintenanceAssignmentVirtualMachineScaleSet(), + "azurerm_maintenance_configuration": resourceArmMaintenanceConfiguration(), } } diff --git a/azurerm/internal/services/maintenance/validate/maintenance.go b/azurerm/internal/services/maintenance/validate/maintenance.go new file mode 100644 index 000000000000..c2bd3c4c9803 --- /dev/null +++ b/azurerm/internal/services/maintenance/validate/maintenance.go @@ -0,0 +1,154 @@ +package validate + +import ( + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" +) + +func MaintenanceTimeZone() pluginsdk.SchemaValidateFunc { + // Output from [System.TimeZoneInfo]::GetSystemTimeZones() + candidates := []string{ + "Afghanistan Standard Time", + "Alaskan Standard Time", + "Aleutian Standard Time", + "Altai Standard Time", + "Arab Standard Time", + "Arabian Standard Time", + "Arabic Standard Time", + "Argentina Standard Time", + "Astrakhan Standard Time", + "Atlantic Standard Time", + "AUS Central Standard Time", + "Aus Central W. Standard Time", + "AUS Eastern Standard Time", + "Azerbaijan Standard Time", + "Azores Standard Time", + "Bahia Standard Time", + "Bangladesh Standard Time", + "Belarus Standard Time", + "Bougainville Standard Time", + "Canada Central Standard Time", + "Cape Verde Standard Time", + "Caucasus Standard Time", + "Cen. Australia Standard Time", + "Central America Standard Time", + "Central Asia Standard Time", + "Central Brazilian Standard Time", + "Central Europe Standard Time", + "Central European Standard Time", + "Central Pacific Standard Time", + "Central Standard Time", + "Central Standard Time (Mexico)", + "Chatham Islands Standard Time", + "China Standard Time", + "Cuba Standard Time", + "Dateline Standard Time", + "E. Africa Standard Time", + "E. Australia Standard Time", + "E. Europe Standard Time", + "E. South America Standard Time", + "Easter Island Standard Time", + "Eastern Standard Time", + "Eastern Standard Time (Mexico)", + "Egypt Standard Time", + "Ekaterinburg Standard Time", + "Fiji Standard Time", + "FLE Standard Time", + "Georgian Standard Time", + "GMT Standard Time", + "Greenland Standard Time", + "Greenwich Standard Time", + "GTB Standard Time", + "Haiti Standard Time", + "Hawaiian Standard Time", + "India Standard Time", + "Iran Standard Time", + "Israel Standard Time", + "Jordan Standard Time", + "Kaliningrad Standard Time", + "Kamchatka Standard Time", + "Korea Standard Time", + "Libya Standard Time", + "Line Islands Standard Time", + "Lord Howe Standard Time", + "Magadan Standard Time", + "Magallanes Standard Time", + "Marquesas Standard Time", + "Mauritius Standard Time", + "Mid-Atlantic Standard Time", + "Middle East Standard Time", + "Montevideo Standard Time", + "Morocco Standard Time", + "Mountain Standard Time", + "Mountain Standard Time (Mexico)", + "Myanmar Standard Time", + "N. Central Asia Standard Time", + "Namibia Standard Time", + "Nepal Standard Time", + "New Zealand Standard Time", + "Newfoundland Standard Time", + "Norfolk Standard Time", + "North Asia East Standard Time", + "North Asia Standard Time", + "North Korea Standard Time", + "Omsk Standard Time", + "Pacific SA Standard Time", + "Pacific Standard Time", + "Pacific Standard Time (Mexico)", + "Pakistan Standard Time", + "Paraguay Standard Time", + "Qyzylorda Standard Time", + "Romance Standard Time", + "Russia Time Zone 10", + "Russia Time Zone 11", + "Russia Time Zone 3", + "Russian Standard Time", + "SA Eastern Standard Time", + "SA Pacific Standard Time", + "SA Western Standard Time", + "Saint Pierre Standard Time", + "Sakhalin Standard Time", + "Samoa Standard Time", + "Sao Tome Standard Time", + "Saratov Standard Time", + "SE Asia Standard Time", + "Singapore Standard Time", + "South Africa Standard Time", + "South Sudan Standard Time", + "Sri Lanka Standard Time", + "Sudan Standard Time", + "Syria Standard Time", + "Taipei Standard Time", + "Tasmania Standard Time", + "Tocantins Standard Time", + "Tokyo Standard Time", + "Tomsk Standard Time", + "Tonga Standard Time", + "Transbaikal Standard Time", + "Turkey Standard Time", + "Turks And Caicos Standard Time", + "Ulaanbaatar Standard Time", + "US Eastern Standard Time", + "US Mountain Standard Time", + "UTC", + "UTC-02", + "UTC-08", + "UTC-09", + "UTC-11", + "UTC+12", + "UTC+13", + "Venezuela Standard Time", + "Vladivostok Standard Time", + "Volgograd Standard Time", + "W. Australia Standard Time", + "W. Central Africa Standard Time", + "W. Europe Standard Time", + "W. Mongolia Standard Time", + "West Asia Standard Time", + "West Bank Standard Time", + "West Pacific Standard Time", + "Yakutsk Standard Time", + "Yukon Standard Time", + } + return validation.StringInSlice(candidates, false) +} diff --git a/azurerm/internal/services/maintenance/validate/tags.go b/azurerm/internal/services/maintenance/validate/tags.go deleted file mode 100644 index b0abe6073d7c..000000000000 --- a/azurerm/internal/services/maintenance/validate/tags.go +++ /dev/null @@ -1,22 +0,0 @@ -package validate - -import ( - "fmt" - - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" -) - -func TagsWithLowerCaseKey(v interface{}, k string) (warnings []string, errors []error) { - warnings, errors = tags.Validate(v, k) - - tagsMap := v.(map[string]interface{}) - for key := range tagsMap { - for _, c := range key { - if c >= 'A' && c <= 'Z' { - errors = append(errors, fmt.Errorf("the key of %q can not contain upper case letter. The key %q has upper case letter %q", k, key, c)) - } - } - } - - return -} diff --git a/azurerm/internal/services/maintenance/validate/tags_test.go b/azurerm/internal/services/maintenance/validate/tags_test.go deleted file mode 100644 index 3f398ad78814..000000000000 --- a/azurerm/internal/services/maintenance/validate/tags_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package validate - -import ( - "testing" -) - -func TestTagsWithLowerCaseKey(t *testing.T) { - testData := []struct { - input map[string]interface{} - expected bool - }{ - { - input: map[string]interface{}{}, - expected: true, - }, - { - // basic example - input: map[string]interface{}{ - "key1": "Value1", - "key2": "VALUE", - }, - expected: true, - }, - { - // contains upper case key - input: map[string]interface{}{ - "KEY": "value", - "key2": "VALUE", - }, - expected: false, - }, - } - - for _, v := range testData { - t.Logf("[DEBUG] Testing %q..", v.input) - - _, errors := TagsWithLowerCaseKey(v.input, "tags") - actual := len(errors) == 0 - if v.expected != actual { - t.Fatalf("Expected %t but got %t", v.expected, actual) - } - } -} diff --git a/azurerm/internal/services/managedapplications/managed_application_definition_resource.go b/azurerm/internal/services/managedapplications/managed_application_definition_resource.go index f96f12eb34af..2bbee564faf1 100644 --- a/azurerm/internal/services/managedapplications/managed_application_definition_resource.go +++ b/azurerm/internal/services/managedapplications/managed_application_definition_resource.go @@ -6,7 +6,6 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-07-01/managedapplications" - "github.com/hashicorp/terraform-plugin-sdk/helper/structure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -92,7 +91,7 @@ func resourceManagedApplicationDefinition() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringIsJSON, - DiffSuppressFunc: structure.SuppressJsonDiff, + DiffSuppressFunc: pluginsdk.SuppressJsonDiff, ConflictsWith: []string{"package_file_uri"}, }, @@ -106,7 +105,7 @@ func resourceManagedApplicationDefinition() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringIsJSON, - DiffSuppressFunc: structure.SuppressJsonDiff, + DiffSuppressFunc: pluginsdk.SuppressJsonDiff, ConflictsWith: []string{"package_file_uri"}, }, diff --git a/azurerm/internal/services/managedapplications/managed_application_definition_resource_test.go b/azurerm/internal/services/managedapplications/managed_application_definition_resource_test.go index 33b371b415f8..b5219f53f9bd 100644 --- a/azurerm/internal/services/managedapplications/managed_application_definition_resource_test.go +++ b/azurerm/internal/services/managedapplications/managed_application_definition_resource_test.go @@ -167,7 +167,7 @@ resource "azurerm_managed_application_definition" "test" { create_ui_definition = <*%&:\/? and must not end with a space or . func ValidateMsSqlJobAgentName(i interface{}, k string) (_ []string, errors []error) { if m, regexErrs := validate.RegExHelper(i, k, `^[^?<>*%&:\/?]{0,127}[^?<>*%&:\/?. ]$`); !m { diff --git a/azurerm/internal/services/mysql/mysql_database_resource.go b/azurerm/internal/services/mysql/mysql_database_resource.go index a99bb2a89b71..871b3c851952 100644 --- a/azurerm/internal/services/mysql/mysql_database_resource.go +++ b/azurerm/internal/services/mysql/mysql_database_resource.go @@ -5,12 +5,11 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/mysql/parse" - "github.com/Azure/azure-sdk-for-go/services/mysql/mgmt/2020-01-01/mysql" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/mysql/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/mysql/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" diff --git a/azurerm/internal/services/mysql/mysql_database_resource_test.go b/azurerm/internal/services/mysql/mysql_database_resource_test.go index 4e624e5e6bf7..f5052aa4adda 100644 --- a/azurerm/internal/services/mysql/mysql_database_resource_test.go +++ b/azurerm/internal/services/mysql/mysql_database_resource_test.go @@ -5,11 +5,10 @@ import ( "fmt" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/mysql/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/mysql/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) diff --git a/azurerm/internal/services/mysql/mysql_firewall_rule_resource_test.go b/azurerm/internal/services/mysql/mysql_firewall_rule_resource_test.go index 409da3816121..ab8d2c9d1fb9 100644 --- a/azurerm/internal/services/mysql/mysql_firewall_rule_resource_test.go +++ b/azurerm/internal/services/mysql/mysql_firewall_rule_resource_test.go @@ -5,11 +5,10 @@ import ( "fmt" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/mysql/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/mysql/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) diff --git a/azurerm/internal/services/mysql/mysql_server_data_source.go b/azurerm/internal/services/mysql/mysql_server_data_source.go index 70f23bca3df0..9f066bea5e88 100644 --- a/azurerm/internal/services/mysql/mysql_server_data_source.go +++ b/azurerm/internal/services/mysql/mysql_server_data_source.go @@ -4,12 +4,11 @@ import ( "fmt" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/mysql/parse" - "github.com/Azure/azure-sdk-for-go/services/mysql/mgmt/2020-01-01/mysql" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/mysql/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/mysql/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" diff --git a/azurerm/internal/services/mysql/mysql_virtual_network_rule_resource.go b/azurerm/internal/services/mysql/mysql_virtual_network_rule_resource.go index cb731b95bdff..2f37d0c2fa15 100644 --- a/azurerm/internal/services/mysql/mysql_virtual_network_rule_resource.go +++ b/azurerm/internal/services/mysql/mysql_virtual_network_rule_resource.go @@ -109,7 +109,7 @@ func resourceMySQLVirtualNetworkRuleCreateUpdate(d *pluginsdk.ResourceData, meta stateConf.Timeout = d.Timeout(pluginsdk.TimeoutUpdate) } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for %s to be created or updated: %+v", id, err) } diff --git a/azurerm/internal/services/mysql/mysql_virtual_network_rule_resource_test.go b/azurerm/internal/services/mysql/mysql_virtual_network_rule_resource_test.go index 07cde93d00e3..9c1be3e9156d 100644 --- a/azurerm/internal/services/mysql/mysql_virtual_network_rule_resource_test.go +++ b/azurerm/internal/services/mysql/mysql_virtual_network_rule_resource_test.go @@ -6,11 +6,10 @@ import ( "regexp" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/mysql/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/mysql/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) diff --git a/azurerm/internal/services/netapp/netapp_account_data_source.go b/azurerm/internal/services/netapp/netapp_account_data_source.go index c4bfcdb23086..4ce1e1406511 100644 --- a/azurerm/internal/services/netapp/netapp_account_data_source.go +++ b/azurerm/internal/services/netapp/netapp_account_data_source.go @@ -4,10 +4,9 @@ import ( "fmt" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/netapp/validate" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/netapp/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" diff --git a/azurerm/internal/services/netapp/netapp_pool_data_source.go b/azurerm/internal/services/netapp/netapp_pool_data_source.go index 76d374ebcade..5fde20a14f3d 100644 --- a/azurerm/internal/services/netapp/netapp_pool_data_source.go +++ b/azurerm/internal/services/netapp/netapp_pool_data_source.go @@ -4,10 +4,9 @@ import ( "fmt" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/netapp/validate" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/netapp/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" diff --git a/azurerm/internal/services/netapp/netapp_pool_resource.go b/azurerm/internal/services/netapp/netapp_pool_resource.go index 0a3cebe35e49..cf9fa5dbfa8c 100644 --- a/azurerm/internal/services/netapp/netapp_pool_resource.go +++ b/azurerm/internal/services/netapp/netapp_pool_resource.go @@ -205,7 +205,7 @@ func resourceNetAppPoolDelete(d *pluginsdk.ResourceData, meta interface{}) error Timeout: d.Timeout(pluginsdk.TimeoutDelete), } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("Error waiting for NetApp Pool %q (Resource Group %q) to be deleted: %+v", id.Name, id.ResourceGroup, err) } diff --git a/azurerm/internal/services/netapp/netapp_snapshot_data_source.go b/azurerm/internal/services/netapp/netapp_snapshot_data_source.go index 1841f5f5e3a6..56bed9eab5a0 100644 --- a/azurerm/internal/services/netapp/netapp_snapshot_data_source.go +++ b/azurerm/internal/services/netapp/netapp_snapshot_data_source.go @@ -4,10 +4,9 @@ import ( "fmt" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/netapp/validate" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/netapp/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" diff --git a/azurerm/internal/services/netapp/netapp_snapshot_resource.go b/azurerm/internal/services/netapp/netapp_snapshot_resource.go index eaf2ccb1e968..f839b1edd8c9 100644 --- a/azurerm/internal/services/netapp/netapp_snapshot_resource.go +++ b/azurerm/internal/services/netapp/netapp_snapshot_resource.go @@ -209,7 +209,7 @@ func resourceNetAppSnapshotDelete(d *pluginsdk.ResourceData, meta interface{}) e Timeout: d.Timeout(pluginsdk.TimeoutDelete), } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("Error waiting for NetApp Snapshot %q (Resource Group %q) to be deleted: %+v", id.Name, id.ResourceGroup, err) } diff --git a/azurerm/internal/services/netapp/netapp_volume_data_source.go b/azurerm/internal/services/netapp/netapp_volume_data_source.go index e57e77235294..25050eb051ae 100644 --- a/azurerm/internal/services/netapp/netapp_volume_data_source.go +++ b/azurerm/internal/services/netapp/netapp_volume_data_source.go @@ -4,10 +4,9 @@ import ( "fmt" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/netapp/validate" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/netapp/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" diff --git a/azurerm/internal/services/netapp/netapp_volume_resource.go b/azurerm/internal/services/netapp/netapp_volume_resource.go index 698958dfb0fc..9ea797977c03 100644 --- a/azurerm/internal/services/netapp/netapp_volume_resource.go +++ b/azurerm/internal/services/netapp/netapp_volume_resource.go @@ -588,7 +588,7 @@ func waitForVolumeCreation(ctx context.Context, client *netapp.VolumesClient, id Timeout: timeout, } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("Error waiting NetApp Volume Provisioning Service %q (Resource Group %q) to complete: %+v", id.Name, id.ResourceGroup, err) } @@ -606,7 +606,7 @@ func waitForReplAuthorization(ctx context.Context, client *netapp.VolumesClient, Timeout: timeout, } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("Error waiting for replication authorization NetApp Volume Provisioning Service %q (Resource Group %q) to complete: %+v", id.Name, id.ResourceGroup, err) } @@ -624,7 +624,7 @@ func waitForReplMirrorState(ctx context.Context, client *netapp.VolumesClient, i Timeout: timeout, } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("Error waiting for NetApp Volume %q (Resource Group %q) to be in %s mirroring state: %+v", id.Name, id.ResourceGroup, desiredState, err) } @@ -642,7 +642,7 @@ func waitForReplicationDeletion(ctx context.Context, client *netapp.VolumesClien Timeout: timeout, } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("Error waiting for NetApp Volume replication %q (Resource Group %q) to be deleted: %+v", id.Name, id.ResourceGroup, err) } @@ -660,7 +660,7 @@ func waitForVolumeDeletion(ctx context.Context, client *netapp.VolumesClient, id Timeout: timeout, } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("Error waiting for NetApp Volume Provisioning Service %q (Resource Group %q) to be deleted: %+v", id.Name, id.ResourceGroup, err) } diff --git a/azurerm/internal/services/netapp/netapp_volume_resource_test.go b/azurerm/internal/services/netapp/netapp_volume_resource_test.go index d11da2ecbeab..0fe615d8a379 100644 --- a/azurerm/internal/services/netapp/netapp_volume_resource_test.go +++ b/azurerm/internal/services/netapp/netapp_volume_resource_test.go @@ -6,11 +6,10 @@ import ( "os" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/netapp/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/netapp/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) diff --git a/azurerm/internal/services/network/application_gateway_data_source.go b/azurerm/internal/services/network/application_gateway_data_source.go index ab57c40463a9..e82fcf7c6bb1 100644 --- a/azurerm/internal/services/network/application_gateway_data_source.go +++ b/azurerm/internal/services/network/application_gateway_data_source.go @@ -4,11 +4,12 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/identity" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" + msiparse "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/msi/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" @@ -63,7 +64,10 @@ func dataSourceApplicationGatewayRead(d *pluginsdk.ResourceData, meta interface{ d.Set("location", location.NormalizeNilable(resp.Location)) - identity := flattenApplicationGatewayDataSourceIdentity(resp.Identity) + identity, err := flattenApplicationGatewayDataSourceIdentity(resp.Identity) + if err != nil { + return err + } flattenedIdentity := applicationGatewayDataSourceIdentity{}.Flatten(identity) if err = d.Set("identity", flattenedIdentity); err != nil { return err @@ -72,14 +76,21 @@ func dataSourceApplicationGatewayRead(d *pluginsdk.ResourceData, meta interface{ return tags.FlattenAndSet(d, resp.Tags) } -func flattenApplicationGatewayDataSourceIdentity(input *network.ManagedServiceIdentity) *identity.ExpandedConfig { +func flattenApplicationGatewayDataSourceIdentity(input *network.ManagedServiceIdentity) (*identity.ExpandedConfig, error) { var config *identity.ExpandedConfig if input != nil { + identityIds := make([]string, 0, len(input.UserAssignedIdentities)) + for id := range input.UserAssignedIdentities { + parsedId, err := msiparse.UserAssignedIdentityIDInsensitively(id) + if err != nil { + return nil, err + } + identityIds = append(identityIds, parsedId.ID()) + } config = &identity.ExpandedConfig{ - Type: string(input.Type), - PrincipalId: input.PrincipalID, - TenantId: input.TenantID, + Type: string(input.Type), + UserAssignedIdentityIds: &identityIds, } } - return config + return config, nil } diff --git a/azurerm/internal/services/network/application_gateway_data_source_test.go b/azurerm/internal/services/network/application_gateway_data_source_test.go index b4775eb364f8..d9c4f60a8578 100644 --- a/azurerm/internal/services/network/application_gateway_data_source_test.go +++ b/azurerm/internal/services/network/application_gateway_data_source_test.go @@ -25,6 +25,21 @@ func TestAccDataSourceAppGateway_basic(t *testing.T) { }) } +func TestAccDataSourceAppGateway_userAssignedIdentity(t *testing.T) { + data := acceptance.BuildTestData(t, "data.azurerm_application_gateway", "test") + r := AppGatewayDataSource{} + + data.DataSourceTest(t, []acceptance.TestStep{ + { + Config: r.userAssignedIdentity(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).Key("location").Exists(), + check.That(data.ResourceName).Key("identity.0.identity_ids.#").HasValue("1"), + ), + }, + }) +} + func (AppGatewayDataSource) basic(data acceptance.TestData) string { return fmt.Sprintf(` %s @@ -35,3 +50,14 @@ data "azurerm_application_gateway" "test" { } `, ApplicationGatewayResource{}.basic(data)) } + +func (AppGatewayDataSource) userAssignedIdentity(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +data "azurerm_application_gateway" "test" { + resource_group_name = azurerm_application_gateway.test.resource_group_name + name = azurerm_application_gateway.test.name +} +`, ApplicationGatewayResource{}.UserDefinedIdentity(data)) +} diff --git a/azurerm/internal/services/network/application_gateway_resource.go b/azurerm/internal/services/network/application_gateway_resource.go index b2d94ab0a6c5..dea64428d8a4 100644 --- a/azurerm/internal/services/network/application_gateway_resource.go +++ b/azurerm/internal/services/network/application_gateway_resource.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" @@ -169,8 +169,8 @@ func resourceApplicationGateway() *pluginsdk.Resource { Required: true, DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.StringInSlice([]string{ - string(network.HTTP), - string(network.HTTPS), + string(network.ProtocolHTTP), + string(network.ProtocolHTTPS), }, true), }, @@ -179,8 +179,8 @@ func resourceApplicationGateway() *pluginsdk.Resource { Required: true, DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.StringInSlice([]string{ - string(network.Enabled), - string(network.Disabled), + string(network.ApplicationGatewayCookieBasedAffinityEnabled), + string(network.ApplicationGatewayCookieBasedAffinityDisabled), }, true), }, @@ -307,8 +307,8 @@ func resourceApplicationGateway() *pluginsdk.Resource { Computed: true, DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.StringInSlice([]string{ - string(network.Dynamic), - string(network.Static), + string(network.IPAllocationMethodDynamic), + string(network.IPAllocationMethodStatic), }, true), }, @@ -394,8 +394,8 @@ func resourceApplicationGateway() *pluginsdk.Resource { Required: true, DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.StringInSlice([]string{ - string(network.HTTP), - string(network.HTTPS), + string(network.ProtocolHTTP), + string(network.ProtocolHTTPS), }, true), }, @@ -451,8 +451,8 @@ func resourceApplicationGateway() *pluginsdk.Resource { Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.HTTPStatus403), - string(network.HTTPStatus502), + string(network.ApplicationGatewayCustomErrorStatusCodeHTTPStatus403), + string(network.ApplicationGatewayCustomErrorStatusCodeHTTPStatus502), }, false), }, @@ -493,8 +493,8 @@ func resourceApplicationGateway() *pluginsdk.Resource { Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.Basic), - string(network.PathBasedRouting), + string(network.ApplicationGatewayRequestRoutingRuleTypeBasic), + string(network.ApplicationGatewayRequestRoutingRuleTypePathBasedRouting), }, false), }, @@ -583,10 +583,10 @@ func resourceApplicationGateway() *pluginsdk.Resource { Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.Permanent), - string(network.Temporary), - string(network.Found), - string(network.SeeOther), + string(network.ApplicationGatewayRedirectTypePermanent), + string(network.ApplicationGatewayRedirectTypeTemporary), + string(network.ApplicationGatewayRedirectTypeFound), + string(network.ApplicationGatewayRedirectTypeSeeOther), }, false), }, @@ -656,13 +656,13 @@ func resourceApplicationGateway() *pluginsdk.Resource { Required: true, DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.StringInSlice([]string{ - string(network.StandardSmall), - string(network.StandardMedium), - string(network.StandardLarge), - string(network.StandardV2), - string(network.WAFLarge), - string(network.WAFMedium), - string(network.WAFV2), + string(network.ApplicationGatewaySkuNameStandardSmall), + string(network.ApplicationGatewaySkuNameStandardMedium), + string(network.ApplicationGatewaySkuNameStandardLarge), + string(network.ApplicationGatewaySkuNameStandardV2), + string(network.ApplicationGatewaySkuNameWAFLarge), + string(network.ApplicationGatewaySkuNameWAFMedium), + string(network.ApplicationGatewaySkuNameWAFV2), }, true), }, @@ -759,9 +759,9 @@ func resourceApplicationGateway() *pluginsdk.Resource { Elem: &pluginsdk.Schema{ Type: pluginsdk.TypeString, ValidateFunc: validation.StringInSlice([]string{ - string(network.TLSv10), - string(network.TLSv11), - string(network.TLSv12), + string(network.ApplicationGatewaySslProtocolTLSv10), + string(network.ApplicationGatewaySslProtocolTLSv11), + string(network.ApplicationGatewaySslProtocolTLSv12), }, false), }, }, @@ -770,8 +770,8 @@ func resourceApplicationGateway() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.Custom), - string(network.Predefined), + string(network.ApplicationGatewaySslPolicyTypeCustom), + string(network.ApplicationGatewaySslPolicyTypePredefined), }, false), }, @@ -793,9 +793,9 @@ func resourceApplicationGateway() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.TLSv10), - string(network.TLSv11), - string(network.TLSv12), + string(network.ApplicationGatewaySslProtocolTLSv10), + string(network.ApplicationGatewaySslProtocolTLSv11), + string(network.ApplicationGatewaySslProtocolTLSv12), }, false), }, }, @@ -822,8 +822,8 @@ func resourceApplicationGateway() *pluginsdk.Resource { Required: true, DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.StringInSlice([]string{ - string(network.HTTP), - string(network.HTTPS), + string(network.ProtocolHTTP), + string(network.ProtocolHTTPS), }, true), }, @@ -1218,8 +1218,8 @@ func resourceApplicationGateway() *pluginsdk.Resource { Required: true, DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.StringInSlice([]string{ - string(network.Detection), - string(network.Prevention), + string(network.ApplicationGatewayFirewallModeDetection), + string(network.ApplicationGatewayFirewallModePrevention), }, true), }, @@ -1283,9 +1283,9 @@ func resourceApplicationGateway() *pluginsdk.Resource { Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.RequestArgNames), - string(network.RequestCookieNames), - string(network.RequestHeaderNames), + string(network.OwaspCrsExclusionEntryMatchVariableRequestArgNames), + string(network.OwaspCrsExclusionEntryMatchVariableRequestCookieNames), + string(network.OwaspCrsExclusionEntryMatchVariableRequestHeaderNames), }, false), }, @@ -1327,8 +1327,8 @@ func resourceApplicationGateway() *pluginsdk.Resource { Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.HTTPStatus403), - string(network.HTTPStatus502), + string(network.ApplicationGatewayCustomErrorStatusCodeHTTPStatus403), + string(network.ApplicationGatewayCustomErrorStatusCodeHTTPStatus502), }, false), }, @@ -1482,8 +1482,8 @@ func resourceApplicationGatewayCreateUpdate(d *pluginsdk.ResourceData, meta inte appGWSkuTier := d.Get("sku.0.tier").(string) wafFileUploadLimit := d.Get("waf_configuration.0.file_upload_limit_mb").(int) - if appGWSkuTier != string(network.WAFV2) && wafFileUploadLimit > 500 { - return fmt.Errorf("Only SKU `%s` allows `file_upload_limit_mb` to exceed 500MB", network.WAFV2) + if appGWSkuTier != string(network.ApplicationGatewayTierWAFV2) && wafFileUploadLimit > 500 { + return fmt.Errorf("Only SKU `%s` allows `file_upload_limit_mb` to exceed 500MB", network.ApplicationGatewayTierWAFV2) } if v, ok := d.GetOk("firewall_policy_id"); ok { @@ -2173,7 +2173,7 @@ func flattenApplicationGatewayConnectionDraining(input *network.ApplicationGatew func expandApplicationGatewaySslPolicy(d *pluginsdk.ResourceData) *network.ApplicationGatewaySslPolicy { policy := network.ApplicationGatewaySslPolicy{} - disabledSSLPolicies := make([]network.ApplicationGatewaySslProtocol, 0) + disabledSSLProtocols := make([]network.ApplicationGatewaySslProtocol, 0) vs := d.Get("ssl_policy").([]interface{}) @@ -2182,16 +2182,16 @@ func expandApplicationGatewaySslPolicy(d *pluginsdk.ResourceData) *network.Appli policyType := network.ApplicationGatewaySslPolicyType(v["policy_type"].(string)) for _, policy := range v["disabled_protocols"].([]interface{}) { - disabledSSLPolicies = append(disabledSSLPolicies, network.ApplicationGatewaySslProtocol(policy.(string))) + disabledSSLProtocols = append(disabledSSLProtocols, network.ApplicationGatewaySslProtocol(policy.(string))) } - if policyType == network.Predefined { + if policyType == network.ApplicationGatewaySslPolicyTypePredefined { policyName := network.ApplicationGatewaySslPolicyName(v["policy_name"].(string)) policy = network.ApplicationGatewaySslPolicy{ PolicyType: policyType, PolicyName: policyName, } - } else if policyType == network.Custom { + } else if policyType == network.ApplicationGatewaySslPolicyTypeCustom { minProtocolVersion := network.ApplicationGatewaySslProtocol(v["min_protocol_version"].(string)) cipherSuites := make([]network.ApplicationGatewaySslCipherSuite, 0) @@ -2207,9 +2207,9 @@ func expandApplicationGatewaySslPolicy(d *pluginsdk.ResourceData) *network.Appli } } - if len(disabledSSLPolicies) > 0 { + if len(disabledSSLProtocols) > 0 { policy = network.ApplicationGatewaySslPolicy{ - DisabledSslProtocols: &disabledSSLPolicies, + DisabledSslProtocols: &disabledSSLProtocols, } } @@ -3895,6 +3895,16 @@ func applicationGatewayCustomizeDiff(ctx context.Context, d *pluginsdk.ResourceD return fmt.Errorf("The Application Gateway must specify either `capacity` or `autoscale_configuration` for the selected SKU tier %q", tier) } + sslPolicy := d.Get("ssl_policy").([]interface{}) + if len(sslPolicy) > 0 && sslPolicy[0] != nil { + v := sslPolicy[0].(map[string]interface{}) + disabledProtocols := v["disabled_protocols"].([]interface{}) + policyType := v["policy_type"].(string) + if len(disabledProtocols) > 0 && policyType != "" { + return fmt.Errorf("setting disabled_protocols is not allowed when policy_type is defined") + } + } + if hasCapacity { if (strings.EqualFold(tier, string(network.ApplicationGatewayTierStandard)) || strings.EqualFold(tier, string(network.ApplicationGatewayTierWAF))) && (capacity.(int) < 1 || capacity.(int) > 32) { return fmt.Errorf("The value '%d' exceeds the maximum capacity allowed for a %q V1 SKU, the %q SKU must have a capacity value between 1 and 32", capacity, tier, tier) diff --git a/azurerm/internal/services/network/application_gateway_resource_test.go b/azurerm/internal/services/network/application_gateway_resource_test.go index 0804894c44c2..2126d536ebe7 100644 --- a/azurerm/internal/services/network/application_gateway_resource_test.go +++ b/azurerm/internal/services/network/application_gateway_resource_test.go @@ -9,7 +9,7 @@ import ( "regexp" "testing" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" diff --git a/azurerm/internal/services/network/application_security_group_resource.go b/azurerm/internal/services/network/application_security_group_resource.go index cc289bcebae2..3f84fd8c67d0 100644 --- a/azurerm/internal/services/network/application_security_group_resource.go +++ b/azurerm/internal/services/network/application_security_group_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" diff --git a/azurerm/internal/services/network/bastion_host_resource.go b/azurerm/internal/services/network/bastion_host_resource.go index 5ce414767616..481cba9b2da4 100644 --- a/azurerm/internal/services/network/bastion_host_resource.go +++ b/azurerm/internal/services/network/bastion_host_resource.go @@ -5,8 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" - + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" diff --git a/azurerm/internal/services/network/client/client.go b/azurerm/internal/services/network/client/client.go index 9cef897bbac0..a604e5b566fa 100644 --- a/azurerm/internal/services/network/client/client.go +++ b/azurerm/internal/services/network/client/client.go @@ -1,7 +1,7 @@ package client import ( - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/common" ) @@ -13,9 +13,12 @@ type Client struct { DDOSProtectionPlansClient *network.DdosProtectionPlansClient ExpressRouteAuthsClient *network.ExpressRouteCircuitAuthorizationsClient ExpressRouteCircuitsClient *network.ExpressRouteCircuitsClient + ExpressRouteCircuitConnectionClient *network.ExpressRouteCircuitConnectionsClient + ExpressRouteConnectionsClient *network.ExpressRouteConnectionsClient ExpressRouteGatewaysClient *network.ExpressRouteGatewaysClient ExpressRoutePeeringsClient *network.ExpressRouteCircuitPeeringsClient ExpressRoutePortsClient *network.ExpressRoutePortsClient + FlowLogsClient *network.FlowLogsClient HubRouteTableClient *network.HubRouteTablesClient HubVirtualNetworkConnectionClient *network.HubVirtualNetworkConnectionsClient InterfacesClient *network.InterfacesClient @@ -80,6 +83,12 @@ func NewClient(o *common.ClientOptions) *Client { ExpressRouteCircuitsClient := network.NewExpressRouteCircuitsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&ExpressRouteCircuitsClient.Client, o.ResourceManagerAuthorizer) + ExpressRouteCircuitConnectionClient := network.NewExpressRouteCircuitConnectionsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&ExpressRouteCircuitConnectionClient.Client, o.ResourceManagerAuthorizer) + + ExpressRouteConnectionsClient := network.NewExpressRouteConnectionsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&ExpressRouteConnectionsClient.Client, o.ResourceManagerAuthorizer) + ExpressRouteGatewaysClient := network.NewExpressRouteGatewaysClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&ExpressRouteGatewaysClient.Client, o.ResourceManagerAuthorizer) @@ -89,6 +98,9 @@ func NewClient(o *common.ClientOptions) *Client { ExpressRoutePortsClient := network.NewExpressRoutePortsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&ExpressRoutePortsClient.Client, o.ResourceManagerAuthorizer) + FlowLogsClient := network.NewFlowLogsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&FlowLogsClient.Client, o.ResourceManagerAuthorizer) + HubRouteTableClient := network.NewHubRouteTablesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&HubRouteTableClient.Client, o.ResourceManagerAuthorizer) @@ -217,9 +229,12 @@ func NewClient(o *common.ClientOptions) *Client { DDOSProtectionPlansClient: &DDOSProtectionPlansClient, ExpressRouteAuthsClient: &ExpressRouteAuthsClient, ExpressRouteCircuitsClient: &ExpressRouteCircuitsClient, + ExpressRouteCircuitConnectionClient: &ExpressRouteCircuitConnectionClient, + ExpressRouteConnectionsClient: &ExpressRouteConnectionsClient, ExpressRouteGatewaysClient: &ExpressRouteGatewaysClient, ExpressRoutePeeringsClient: &ExpressRoutePeeringsClient, ExpressRoutePortsClient: &ExpressRoutePortsClient, + FlowLogsClient: &FlowLogsClient, HubRouteTableClient: &HubRouteTableClient, HubVirtualNetworkConnectionClient: &HubVirtualNetworkConnectionClient, InterfacesClient: &InterfacesClient, diff --git a/azurerm/internal/services/network/express_route_circuit_authorization_resource.go b/azurerm/internal/services/network/express_route_circuit_authorization_resource.go index 133284a9bf0a..58d4ef2d6110 100644 --- a/azurerm/internal/services/network/express_route_circuit_authorization_resource.go +++ b/azurerm/internal/services/network/express_route_circuit_authorization_resource.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" diff --git a/azurerm/internal/services/network/express_route_circuit_authorization_resource_test.go b/azurerm/internal/services/network/express_route_circuit_authorization_resource_test.go index af2818ba4424..b70f9e72722b 100644 --- a/azurerm/internal/services/network/express_route_circuit_authorization_resource_test.go +++ b/azurerm/internal/services/network/express_route_circuit_authorization_resource_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -21,7 +20,7 @@ func testAccExpressRouteCircuitAuthorization_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_express_route_circuit_authorization", "test") r := ExpressRouteCircuitAuthorizationResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.basicConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -37,7 +36,7 @@ func testAccExpressRouteCircuitAuthorization_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_express_route_circuit_authorization", "test") r := ExpressRouteCircuitAuthorizationResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.basicConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -57,7 +56,7 @@ func testAccExpressRouteCircuitAuthorization_multiple(t *testing.T) { r := ExpressRouteCircuitAuthorizationResource{} secondResourceName := "azurerm_express_route_circuit_authorization.test2" - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.multipleConfig(data), Check: acceptance.ComposeTestCheckFunc( diff --git a/azurerm/internal/services/network/express_route_circuit_connection_resource.go b/azurerm/internal/services/network/express_route_circuit_connection_resource.go new file mode 100644 index 000000000000..8ceec877ab09 --- /dev/null +++ b/azurerm/internal/services/network/express_route_circuit_connection_resource.go @@ -0,0 +1,299 @@ +package network + +import ( + "fmt" + "log" + "time" + + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceExpressRouteCircuitConnection() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceExpressRouteCircuitConnectionCreate, + Read: resourceExpressRouteCircuitConnectionRead, + Update: resourceExpressRouteCircuitConnectionUpdate, + Delete: resourceExpressRouteCircuitConnectionDelete, + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.ExpressRouteCircuitConnectionID(id) + return err + }), + + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.ExpressRouteCircuitConnectionName, + }, + + "peering_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.ExpressRouteCircuitPeeringID, + }, + + "peer_peering_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.ExpressRouteCircuitPeeringID, + }, + + "address_prefix_ipv4": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.IsCIDR, + }, + + "authorization_key": { + Type: pluginsdk.TypeString, + Optional: true, + Sensitive: true, + ValidateFunc: validation.IsUUID, + }, + + "address_prefix_ipv6": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.IsCIDR, + }, + }, + } +} + +func resourceExpressRouteCircuitConnectionCreate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Network.ExpressRouteCircuitConnectionClient + circuitClient := meta.(*clients.Client).Network.ExpressRouteCircuitsClient + ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) + defer cancel() + + circuitPeeringId, err := parse.ExpressRouteCircuitPeeringID(d.Get("peering_id").(string)) + if err != nil { + return err + } + + id := parse.NewExpressRouteCircuitConnectionID(circuitPeeringId.SubscriptionId, circuitPeeringId.ResourceGroup, circuitPeeringId.ExpressRouteCircuitName, circuitPeeringId.PeeringName, d.Get("name").(string)) + + existing, err := client.Get(ctx, id.ResourceGroup, id.ExpressRouteCircuitName, id.PeeringName, id.ConnectionName) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("checking for existing %s: %+v", id, err) + } + } + + if !utils.ResponseWasNotFound(existing.Response) { + return tf.ImportAsExistsError("azurerm_express_route_circuit_connection", id.ID()) + } + + circuitPeerPeeringId, err := parse.ExpressRouteCircuitPeeringID(d.Get("peer_peering_id").(string)) + if err != nil { + return err + } + + expressRouteCircuitConnectionParameters := network.ExpressRouteCircuitConnection{ + Name: utils.String(id.ConnectionName), + ExpressRouteCircuitConnectionPropertiesFormat: &network.ExpressRouteCircuitConnectionPropertiesFormat{ + AddressPrefix: utils.String(d.Get("address_prefix_ipv4").(string)), + ExpressRouteCircuitPeering: &network.SubResource{ + ID: utils.String(circuitPeeringId.ID()), + }, + PeerExpressRouteCircuitPeering: &network.SubResource{ + ID: utils.String(circuitPeerPeeringId.ID()), + }, + }, + } + + if v, ok := d.GetOk("authorization_key"); ok { + expressRouteCircuitConnectionParameters.ExpressRouteCircuitConnectionPropertiesFormat.AuthorizationKey = utils.String(v.(string)) + } + + if v, ok := d.GetOk("address_prefix_ipv6"); ok { + circuitId := parse.NewExpressRouteCircuitID(circuitPeeringId.SubscriptionId, circuitPeeringId.ResourceGroup, circuitPeeringId.ExpressRouteCircuitName) + + circuit, err := circuitClient.Get(ctx, circuitId.ResourceGroup, circuitId.Name) + if err != nil { + return fmt.Errorf("retrieving %s: %+v", circuitId, err) + } + + if circuit.ExpressRouteCircuitPropertiesFormat != nil && circuit.ExpressRouteCircuitPropertiesFormat.ExpressRoutePort != nil { + return fmt.Errorf("`address_prefix_ipv6` cannot be set when ExpressRoute Circuit Connection with ExpressRoute Circuit based on ExpressRoute Port") + } else { + expressRouteCircuitConnectionParameters.ExpressRouteCircuitConnectionPropertiesFormat.Ipv6CircuitConnectionConfig = &network.Ipv6CircuitConnectionConfig{ + AddressPrefix: utils.String(v.(string)), + } + } + } + + future, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.ExpressRouteCircuitName, id.PeeringName, id.ConnectionName, expressRouteCircuitConnectionParameters) + if err != nil { + return fmt.Errorf("creating %s: %+v", id, err) + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for creation of %s: %+v", id, err) + } + + d.SetId(id.ID()) + + return resourceExpressRouteCircuitConnectionRead(d, meta) +} + +func resourceExpressRouteCircuitConnectionRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Network.ExpressRouteCircuitConnectionClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.ExpressRouteCircuitConnectionID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, id.ResourceGroup, id.ExpressRouteCircuitName, id.PeeringName, id.ConnectionName) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[INFO] %s does not exist - removing from state", *id) + d.SetId("") + return nil + } + return fmt.Errorf("retrieving %s: %+v", *id, err) + } + + d.Set("name", id.ConnectionName) + d.Set("peering_id", parse.NewExpressRouteCircuitPeeringID(id.SubscriptionId, id.ResourceGroup, id.ExpressRouteCircuitName, id.PeeringName).ID()) + + if props := resp.ExpressRouteCircuitConnectionPropertiesFormat; props != nil { + d.Set("address_prefix_ipv4", props.AddressPrefix) + + // The ExpressRoute Circuit Connection API returns "*****************" for AuthorizationKey when it's changed from a valid value to `nil` + // See more details from https://github.com/Azure/azure-rest-api-specs/issues/15030 + authorizationKey := "" + if props.AuthorizationKey != nil && *props.AuthorizationKey != "*****************" { + authorizationKey = *props.AuthorizationKey + } + d.Set("authorization_key", authorizationKey) + + addressPrefixIPv6 := "" + if props.Ipv6CircuitConnectionConfig != nil && props.Ipv6CircuitConnectionConfig.AddressPrefix != nil { + addressPrefixIPv6 = *props.Ipv6CircuitConnectionConfig.AddressPrefix + } + d.Set("address_prefix_ipv6", addressPrefixIPv6) + + if props.PeerExpressRouteCircuitPeering != nil && props.PeerExpressRouteCircuitPeering.ID != nil { + circuitPeerPeeringId, err := parse.ExpressRouteCircuitPeeringID(*props.PeerExpressRouteCircuitPeering.ID) + if err != nil { + return err + } + d.Set("peer_peering_id", circuitPeerPeeringId.ID()) + } + } + + return nil +} + +func resourceExpressRouteCircuitConnectionUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Network.ExpressRouteCircuitConnectionClient + circuitClient := meta.(*clients.Client).Network.ExpressRouteCircuitsClient + ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.ExpressRouteCircuitConnectionID(d.Id()) + if err != nil { + return err + } + + circuitPeeringId, err := parse.ExpressRouteCircuitPeeringID(d.Get("peering_id").(string)) + if err != nil { + return err + } + + circuitPeerPeeringId, err := parse.ExpressRouteCircuitPeeringID(d.Get("peer_peering_id").(string)) + if err != nil { + return err + } + + expressRouteCircuitConnectionParameters := network.ExpressRouteCircuitConnection{ + Name: utils.String(id.ConnectionName), + ExpressRouteCircuitConnectionPropertiesFormat: &network.ExpressRouteCircuitConnectionPropertiesFormat{ + AddressPrefix: utils.String(d.Get("address_prefix_ipv4").(string)), + ExpressRouteCircuitPeering: &network.SubResource{ + ID: utils.String(circuitPeeringId.ID()), + }, + PeerExpressRouteCircuitPeering: &network.SubResource{ + ID: utils.String(circuitPeerPeeringId.ID()), + }, + }, + } + + if v, ok := d.GetOk("authorization_key"); ok { + expressRouteCircuitConnectionParameters.ExpressRouteCircuitConnectionPropertiesFormat.AuthorizationKey = utils.String(v.(string)) + } + + if v, ok := d.GetOk("address_prefix_ipv6"); ok { + circuitId := parse.NewExpressRouteCircuitID(circuitPeeringId.SubscriptionId, circuitPeeringId.ResourceGroup, circuitPeeringId.ExpressRouteCircuitName) + + circuit, err := circuitClient.Get(ctx, circuitId.ResourceGroup, circuitId.Name) + if err != nil { + return fmt.Errorf("retrieving %s: %+v", circuitId, err) + } + + if circuit.ExpressRouteCircuitPropertiesFormat != nil && circuit.ExpressRouteCircuitPropertiesFormat.ExpressRoutePort != nil { + return fmt.Errorf("`address_prefix_ipv6` cannot be set when ExpressRoute Circuit Connection with ExpressRoute Circuit based on ExpressRoute Port") + } else { + expressRouteCircuitConnectionParameters.ExpressRouteCircuitConnectionPropertiesFormat.Ipv6CircuitConnectionConfig = &network.Ipv6CircuitConnectionConfig{ + AddressPrefix: utils.String(v.(string)), + } + } + } + + future, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.ExpressRouteCircuitName, id.PeeringName, id.ConnectionName, expressRouteCircuitConnectionParameters) + if err != nil { + return fmt.Errorf("updating %s: %+v", id, err) + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for update of %s: %+v", id, err) + } + + return resourceExpressRouteCircuitConnectionRead(d, meta) +} + +func resourceExpressRouteCircuitConnectionDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Network.ExpressRouteCircuitConnectionClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.ExpressRouteCircuitConnectionID(d.Id()) + if err != nil { + return err + } + + future, err := client.Delete(ctx, id.ResourceGroup, id.ExpressRouteCircuitName, id.PeeringName, id.ConnectionName) + if err != nil { + return fmt.Errorf("deleting %s: %+v", *id, err) + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for deletion of %s: %+v", *id, err) + } + + return nil +} diff --git a/azurerm/internal/services/network/express_route_circuit_connection_resource_test.go b/azurerm/internal/services/network/express_route_circuit_connection_resource_test.go new file mode 100644 index 000000000000..f9c7d98b442b --- /dev/null +++ b/azurerm/internal/services/network/express_route_circuit_connection_resource_test.go @@ -0,0 +1,241 @@ +package network_test + +import ( + "context" + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type ExpressRouteCircuitConnectionResource struct{} + +func TestAccExpressRouteCircuitConnection(t *testing.T) { + acceptance.RunTestsInSequence(t, map[string]map[string]func(t *testing.T){ + "Resource": { + "basic": testAccExpressRouteCircuitConnection_basic, + "requiresImport": testAccExpressRouteCircuitConnection_requiresImport, + "complete": testAccExpressRouteCircuitConnection_complete, + "update": testAccExpressRouteCircuitConnection_update, + }, + }) +} + +func testAccExpressRouteCircuitConnection_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_express_route_circuit_connection", "test") + r := ExpressRouteCircuitConnectionResource{} + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func testAccExpressRouteCircuitConnection_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_express_route_circuit_connection", "test") + r := ExpressRouteCircuitConnectionResource{} + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func testAccExpressRouteCircuitConnection_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_express_route_circuit_connection", "test") + r := ExpressRouteCircuitConnectionResource{} + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data, "846a1918-b7a2-4917-b43c-8c4cdaee006a"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func testAccExpressRouteCircuitConnection_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_express_route_circuit_connection", "test") + r := ExpressRouteCircuitConnectionResource{} + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.complete(data, "846a1918-b7a2-4917-b43c-8c4cdaee006a"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.complete(data, "946a1918-b7a2-4917-b43c-8c4cdaee006a"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func (r ExpressRouteCircuitConnectionResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.ExpressRouteCircuitConnectionID(state.ID) + if err != nil { + return nil, err + } + + resp, err := client.Network.ExpressRouteCircuitConnectionClient.Get(ctx, id.ResourceGroup, id.ExpressRouteCircuitName, id.PeeringName, id.ConnectionName) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return utils.Bool(false), nil + } + + return nil, fmt.Errorf("retrieving %s: %+v", *id, err) + } + + return utils.Bool(true), nil +} + +func (r ExpressRouteCircuitConnectionResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_express_route_circuit_connection" "test" { + name = "acctest-ExpressRouteCircuitConn-%d" + peering_id = azurerm_express_route_circuit_peering.test.id + peer_peering_id = azurerm_express_route_circuit_peering.peer_test.id + address_prefix_ipv4 = "192.169.8.0/29" +} +`, r.template(data), data.RandomInteger) +} + +func (r ExpressRouteCircuitConnectionResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_express_route_circuit_connection" "import" { + name = azurerm_express_route_circuit_connection.test.name + peering_id = azurerm_express_route_circuit_connection.test.peering_id + peer_peering_id = azurerm_express_route_circuit_connection.test.peer_peering_id + address_prefix_ipv4 = azurerm_express_route_circuit_connection.test.address_prefix_ipv4 +} +`, r.basic(data)) +} + +func (r ExpressRouteCircuitConnectionResource) complete(data acceptance.TestData, authorizationKey string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_express_route_circuit_connection" "test" { + name = "acctest-ExpressRouteCircuitConn-%d" + peering_id = azurerm_express_route_circuit_peering.test.id + peer_peering_id = azurerm_express_route_circuit_peering.peer_test.id + address_prefix_ipv4 = "192.169.8.0/29" + authorization_key = "%s" +} +`, r.template(data), data.RandomInteger, authorizationKey) +} + +func (r ExpressRouteCircuitConnectionResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-ercircuitconn-%d" + location = "%s" +} + +resource "azurerm_express_route_port" "test" { + name = "acctest-erp-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + peering_location = "Equinix-Seattle-SE2" + bandwidth_in_gbps = 10 + encapsulation = "Dot1Q" +} + +resource "azurerm_express_route_circuit" "test" { + name = "acctest-erc-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + express_route_port_id = azurerm_express_route_port.test.id + bandwidth_in_gbps = 5 + + sku { + tier = "Standard" + family = "MeteredData" + } +} + +resource "azurerm_express_route_port" "peer_test" { + name = "acctest-erp2-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + peering_location = "CDC-Canberra" + bandwidth_in_gbps = 10 + encapsulation = "Dot1Q" +} + +resource "azurerm_express_route_circuit" "peer_test" { + name = "acctest-erc2-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + express_route_port_id = azurerm_express_route_port.peer_test.id + bandwidth_in_gbps = 5 + + sku { + tier = "Standard" + family = "MeteredData" + } +} + +resource "azurerm_express_route_circuit_peering" "test" { + peering_type = "AzurePrivatePeering" + express_route_circuit_name = azurerm_express_route_circuit.test.name + resource_group_name = azurerm_resource_group.test.name + shared_key = "ItsASecret" + peer_asn = 100 + primary_peer_address_prefix = "192.168.1.0/30" + secondary_peer_address_prefix = "192.168.1.0/30" + vlan_id = 100 +} + +resource "azurerm_express_route_circuit_peering" "peer_test" { + peering_type = "AzurePrivatePeering" + express_route_circuit_name = azurerm_express_route_circuit.peer_test.name + resource_group_name = azurerm_resource_group.test.name + shared_key = "ItsASecret" + peer_asn = 100 + primary_peer_address_prefix = "192.168.1.0/30" + secondary_peer_address_prefix = "192.168.1.0/30" + vlan_id = 100 +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger) +} diff --git a/azurerm/internal/services/network/express_route_circuit_data_source.go b/azurerm/internal/services/network/express_route_circuit_data_source.go index a3a952d159b6..dd1507e77eb8 100644 --- a/azurerm/internal/services/network/express_route_circuit_data_source.go +++ b/azurerm/internal/services/network/express_route_circuit_data_source.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" diff --git a/azurerm/internal/services/network/express_route_circuit_peering_resource.go b/azurerm/internal/services/network/express_route_circuit_peering_resource.go index 5c7083c55698..b361a549ac2f 100644 --- a/azurerm/internal/services/network/express_route_circuit_peering_resource.go +++ b/azurerm/internal/services/network/express_route_circuit_peering_resource.go @@ -6,7 +6,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" @@ -41,9 +41,9 @@ func resourceExpressRouteCircuitPeering() *pluginsdk.Resource { Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.AzurePrivatePeering), - string(network.AzurePublicPeering), - string(network.MicrosoftPeering), + string(network.ExpressRoutePeeringTypeAzurePrivatePeering), + string(network.ExpressRoutePeeringTypeAzurePublicPeering), + string(network.ExpressRoutePeeringTypeMicrosoftPeering), }, false), }, @@ -238,7 +238,7 @@ func resourceExpressRouteCircuitPeeringCreateUpdate(d *pluginsdk.ResourceData, m }, } - if strings.EqualFold(peeringType, string(network.MicrosoftPeering)) { + if strings.EqualFold(peeringType, string(network.ExpressRoutePeeringTypeMicrosoftPeering)) { peerings := d.Get("microsoft_peering_config").([]interface{}) if len(peerings) == 0 { return fmt.Errorf("`microsoft_peering_config` must be specified when `peering_type` is set to `MicrosoftPeering`") diff --git a/azurerm/internal/services/network/express_route_circuit_peering_resource_test.go b/azurerm/internal/services/network/express_route_circuit_peering_resource_test.go index 8722a804388d..ebe683380088 100644 --- a/azurerm/internal/services/network/express_route_circuit_peering_resource_test.go +++ b/azurerm/internal/services/network/express_route_circuit_peering_resource_test.go @@ -20,7 +20,7 @@ func testAccExpressRouteCircuitPeering_azurePrivatePeering(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_express_route_circuit_peering", "test") r := ExpressRouteCircuitPeeringResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.privatePeering(data), Check: acceptance.ComposeTestCheckFunc( @@ -37,7 +37,7 @@ func testAccExpressRouteCircuitPeering_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_express_route_circuit_peering", "test") r := ExpressRouteCircuitPeeringResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.privatePeering(data), Check: acceptance.ComposeTestCheckFunc( @@ -52,7 +52,7 @@ func testAccExpressRouteCircuitPeering_microsoftPeering(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_express_route_circuit_peering", "test") r := ExpressRouteCircuitPeeringResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.msPeering(data), Check: acceptance.ComposeTestCheckFunc( @@ -69,7 +69,7 @@ func testAccExpressRouteCircuitPeering_microsoftPeeringIpv6(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_express_route_circuit_peering", "test") r := ExpressRouteCircuitPeeringResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.msPeeringIpv6(data), Check: acceptance.ComposeTestCheckFunc( @@ -84,7 +84,7 @@ func testAccExpressRouteCircuitPeering_microsoftPeeringIpv6CustomerRouting(t *te data := acceptance.BuildTestData(t, "azurerm_express_route_circuit_peering", "test") r := ExpressRouteCircuitPeeringResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.msPeeringIpv6CustomerRouting(data), Check: acceptance.ComposeTestCheckFunc( @@ -99,7 +99,7 @@ func testAccExpressRouteCircuitPeering_microsoftPeeringIpv6WithRouteFilter(t *te data := acceptance.BuildTestData(t, "azurerm_express_route_circuit_peering", "test") r := ExpressRouteCircuitPeeringResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.msPeeringIpv6WithRouteFilter(data), Check: acceptance.ComposeTestCheckFunc( @@ -114,7 +114,7 @@ func testAccExpressRouteCircuitPeering_microsoftPeeringCustomerRouting(t *testin data := acceptance.BuildTestData(t, "azurerm_express_route_circuit_peering", "test") r := ExpressRouteCircuitPeeringResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.msPeeringCustomerRouting(data), Check: acceptance.ComposeTestCheckFunc( @@ -133,7 +133,7 @@ func testAccExpressRouteCircuitPeering_azurePrivatePeeringWithCircuitUpdate(t *t data := acceptance.BuildTestData(t, "azurerm_express_route_circuit_peering", "test") r := ExpressRouteCircuitPeeringResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.privatePeering(data), Check: acceptance.ComposeTestCheckFunc( @@ -158,7 +158,7 @@ func testAccExpressRouteCircuitPeering_microsoftPeeringWithRouteFilter(t *testin data := acceptance.BuildTestData(t, "azurerm_express_route_circuit_peering", "test") r := ExpressRouteCircuitPeeringResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.msPeeringWithRouteFilter(data), Check: acceptance.ComposeTestCheckFunc( @@ -330,17 +330,17 @@ resource "azurerm_express_route_circuit_peering" "test" { express_route_circuit_name = azurerm_express_route_circuit.test.name resource_group_name = azurerm_resource_group.test.name peer_asn = 100 - primary_peer_address_prefix = "192.168.3.0/30" - secondary_peer_address_prefix = "192.168.4.0/30" + primary_peer_address_prefix = "192.168.7.0/30" + secondary_peer_address_prefix = "192.168.8.0/30" vlan_id = 300 microsoft_peering_config { - advertised_public_prefixes = ["123.2.0.0/24"] + advertised_public_prefixes = ["123.4.0.0/24"] } ipv6 { - primary_peer_address_prefix = "2002:db01::/126" - secondary_peer_address_prefix = "2003:db01::/126" + primary_peer_address_prefix = "2002:db03::/126" + secondary_peer_address_prefix = "2003:db03::/126" microsoft_peering { advertised_public_prefixes = ["2002:db01::/126"] @@ -385,19 +385,19 @@ resource "azurerm_express_route_circuit_peering" "test" { express_route_circuit_name = azurerm_express_route_circuit.test.name resource_group_name = azurerm_resource_group.test.name peer_asn = 100 - primary_peer_address_prefix = "192.168.3.0/30" - secondary_peer_address_prefix = "192.168.4.0/30" + primary_peer_address_prefix = "192.168.9.0/30" + secondary_peer_address_prefix = "192.168.10.0/30" vlan_id = 300 microsoft_peering_config { - advertised_public_prefixes = ["123.2.0.0/24"] + advertised_public_prefixes = ["123.5.0.0/24"] } ipv6 { - primary_peer_address_prefix = "2002:db01::/126" - secondary_peer_address_prefix = "2003:db01::/126" + primary_peer_address_prefix = "2002:db05::/126" + secondary_peer_address_prefix = "2003:db05::/126" microsoft_peering { - advertised_public_prefixes = ["2002:db01::/126"] + advertised_public_prefixes = ["2002:db05::/126"] customer_asn = 64511 routing_registry_name = "ARIN" } @@ -454,18 +454,18 @@ resource "azurerm_express_route_circuit_peering" "test" { express_route_circuit_name = azurerm_express_route_circuit.test.name resource_group_name = azurerm_resource_group.test.name peer_asn = 100 - primary_peer_address_prefix = "192.168.3.0/30" - secondary_peer_address_prefix = "192.168.4.0/30" + primary_peer_address_prefix = "192.168.11.0/30" + secondary_peer_address_prefix = "192.168.12.0/30" vlan_id = 300 route_filter_id = azurerm_route_filter.test.id microsoft_peering_config { - advertised_public_prefixes = ["123.2.0.0/24"] + advertised_public_prefixes = ["123.3.0.0/24"] } ipv6 { - primary_peer_address_prefix = "2002:db01::/126" - secondary_peer_address_prefix = "2003:db01::/126" + primary_peer_address_prefix = "2002:db02::/126" + secondary_peer_address_prefix = "2003:db02::/126" route_filter_id = azurerm_route_filter.test.id microsoft_peering { @@ -513,12 +513,12 @@ resource "azurerm_express_route_circuit_peering" "test" { express_route_circuit_name = azurerm_express_route_circuit.test.name resource_group_name = azurerm_resource_group.test.name peer_asn = 100 - primary_peer_address_prefix = "192.168.1.0/30" - secondary_peer_address_prefix = "192.168.2.0/30" + primary_peer_address_prefix = "192.168.3.0/30" + secondary_peer_address_prefix = "192.168.4.0/30" vlan_id = 300 microsoft_peering_config { - advertised_public_prefixes = ["123.1.0.0/24"] + advertised_public_prefixes = ["123.2.0.0/24"] // https://tools.ietf.org/html/rfc5398 customer_asn = 64511 routing_registry_name = "ARIN" @@ -618,8 +618,8 @@ resource "azurerm_express_route_circuit_peering" "test" { express_route_circuit_name = azurerm_express_route_circuit.test.name resource_group_name = azurerm_resource_group.test.name peer_asn = 100 - primary_peer_address_prefix = "192.168.1.0/30" - secondary_peer_address_prefix = "192.168.2.0/30" + primary_peer_address_prefix = "192.168.5.0/30" + secondary_peer_address_prefix = "192.168.6.0/30" vlan_id = 300 route_filter_id = azurerm_route_filter.test.id diff --git a/azurerm/internal/services/network/express_route_circuit_resource.go b/azurerm/internal/services/network/express_route_circuit_resource.go index a1a191125fac..1dd55d95ca92 100644 --- a/azurerm/internal/services/network/express_route_circuit_resource.go +++ b/azurerm/internal/services/network/express_route_circuit_resource.go @@ -6,11 +6,13 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" @@ -55,25 +57,6 @@ func resourceExpressRouteCircuit() *pluginsdk.Resource { "location": azure.SchemaLocation(), - "service_provider_name": { - Type: pluginsdk.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: suppress.CaseDifference, - }, - - "peering_location": { - Type: pluginsdk.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: suppress.CaseDifference, - }, - - "bandwidth_in_mbps": { - Type: pluginsdk.TypeInt, - Required: true, - }, - "sku": { Type: pluginsdk.TypeList, Required: true, @@ -96,8 +79,8 @@ func resourceExpressRouteCircuit() *pluginsdk.Resource { Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.MeteredData), - string(network.UnlimitedData), + string(network.ExpressRouteCircuitSkuFamilyMeteredData), + string(network.ExpressRouteCircuitSkuFamilyUnlimitedData), }, true), DiffSuppressFunc: suppress.CaseDifference, }, @@ -111,6 +94,47 @@ func resourceExpressRouteCircuit() *pluginsdk.Resource { Default: false, }, + "service_provider_name": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: suppress.CaseDifference, + RequiredWith: []string{"bandwidth_in_mbps", "peering_location"}, + ConflictsWith: []string{"bandwidth_in_gbps", "express_route_port_id"}, + }, + + "peering_location": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: suppress.CaseDifference, + RequiredWith: []string{"bandwidth_in_mbps", "service_provider_name"}, + ConflictsWith: []string{"bandwidth_in_gbps", "express_route_port_id"}, + }, + + "bandwidth_in_mbps": { + Type: pluginsdk.TypeInt, + Optional: true, + RequiredWith: []string{"peering_location", "service_provider_name"}, + ConflictsWith: []string{"bandwidth_in_gbps", "express_route_port_id"}, + }, + + "bandwidth_in_gbps": { + Type: pluginsdk.TypeFloat, + Optional: true, + RequiredWith: []string{"express_route_port_id"}, + ConflictsWith: []string{"bandwidth_in_mbps", "peering_location", "service_provider_name"}, + }, + + "express_route_port_id": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + RequiredWith: []string{"bandwidth_in_gbps"}, + ConflictsWith: []string{"bandwidth_in_mbps", "peering_location", "service_provider_name"}, + ValidateFunc: validate.ExpressRoutePortID, + }, + "service_provider_provisioning_state": { Type: pluginsdk.TypeString, Computed: true, @@ -154,12 +178,9 @@ func resourceExpressRouteCircuitCreateUpdate(d *pluginsdk.ResourceData, meta int } location := azure.NormalizeLocation(d.Get("location").(string)) - serviceProviderName := d.Get("service_provider_name").(string) - peeringLocation := d.Get("peering_location").(string) - bandwidthInMbps := int32(d.Get("bandwidth_in_mbps").(int)) sku := expandExpressRouteCircuitSku(d) - allowRdfeOps := d.Get("allow_classic_operations").(bool) t := d.Get("tags").(map[string]interface{}) + allowRdfeOps := d.Get("allow_classic_operations").(bool) expandedTags := tags.Expand(t) // There is the potential for the express route circuit to become out of sync when the service provider updates @@ -191,24 +212,28 @@ func resourceExpressRouteCircuitCreateUpdate(d *pluginsdk.ResourceData, meta int erc.Sku = sku erc.Tags = expandedTags - if erc.ExpressRouteCircuitPropertiesFormat != nil { + if !d.IsNewResource() { erc.ExpressRouteCircuitPropertiesFormat.AllowClassicOperations = &allowRdfeOps - if erc.ExpressRouteCircuitPropertiesFormat.ServiceProviderProperties != nil { - erc.ExpressRouteCircuitPropertiesFormat.ServiceProviderProperties.ServiceProviderName = &serviceProviderName - erc.ExpressRouteCircuitPropertiesFormat.ServiceProviderProperties.PeeringLocation = &peeringLocation - erc.ExpressRouteCircuitPropertiesFormat.ServiceProviderProperties.BandwidthInMbps = &bandwidthInMbps - } } else { - erc.ExpressRouteCircuitPropertiesFormat = &network.ExpressRouteCircuitPropertiesFormat{ - AllowClassicOperations: &allowRdfeOps, - ServiceProviderProperties: &network.ExpressRouteCircuitServiceProviderProperties{ - ServiceProviderName: &serviceProviderName, - PeeringLocation: &peeringLocation, - BandwidthInMbps: &bandwidthInMbps, - }, + erc.ExpressRouteCircuitPropertiesFormat = &network.ExpressRouteCircuitPropertiesFormat{} + + // ServiceProviderProperties and expressRoutePorts/bandwidthInGbps properties are mutually exclusive + if _, ok := d.GetOk("express_route_port_id"); ok { + erc.ExpressRouteCircuitPropertiesFormat.ExpressRoutePort = &network.SubResource{} + } else { + erc.ExpressRouteCircuitPropertiesFormat.ServiceProviderProperties = &network.ExpressRouteCircuitServiceProviderProperties{} } } + if erc.ExpressRouteCircuitPropertiesFormat.ServiceProviderProperties != nil { + erc.ExpressRouteCircuitPropertiesFormat.ServiceProviderProperties.ServiceProviderName = utils.String(d.Get("service_provider_name").(string)) + erc.ExpressRouteCircuitPropertiesFormat.ServiceProviderProperties.PeeringLocation = utils.String(d.Get("peering_location").(string)) + erc.ExpressRouteCircuitPropertiesFormat.ServiceProviderProperties.BandwidthInMbps = utils.Int32(int32(d.Get("bandwidth_in_mbps").(int))) + } else { + erc.ExpressRouteCircuitPropertiesFormat.ExpressRoutePort.ID = utils.String(d.Get("express_route_port_id").(string)) + erc.ExpressRouteCircuitPropertiesFormat.BandwidthInGbps = utils.Float(d.Get("bandwidth_in_gbps").(float64)) + } + future, err := client.CreateOrUpdate(ctx, resGroup, name, erc) if err != nil { return fmt.Errorf("Error Creating/Updating ExpressRouteCircuit %q (Resource Group %q): %+v", name, resGroup, err) @@ -229,7 +254,7 @@ func resourceExpressRouteCircuitCreateUpdate(d *pluginsdk.ResourceData, meta int Timeout: d.Timeout(pluginsdk.TimeoutCreate), } - if _, err = stateConf.WaitForState(); err != nil { + if _, err = stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("Error for Express Route Circuit %q (Resource Group %q) to be able to be queried: %+v", name, resGroup, err) } @@ -283,6 +308,18 @@ func resourceExpressRouteCircuitRead(d *pluginsdk.ResourceData, meta interface{} } } + if resp.ExpressRoutePort != nil { + d.Set("bandwidth_in_gbps", resp.BandwidthInGbps) + + if resp.ExpressRoutePort.ID != nil { + portID, err := parse.ExpressRoutePortID(*resp.ExpressRoutePort.ID) + if err != nil { + return err + } + d.Set("express_route_port_id", portID.ID()) + } + } + if props := resp.ServiceProviderProperties; props != nil { d.Set("service_provider_name", props.ServiceProviderName) d.Set("peering_location", props.PeeringLocation) diff --git a/azurerm/internal/services/network/express_route_circuit_resource_test.go b/azurerm/internal/services/network/express_route_circuit_resource_test.go index c186999d8120..b0c2afb6c222 100644 --- a/azurerm/internal/services/network/express_route_circuit_resource_test.go +++ b/azurerm/internal/services/network/express_route_circuit_resource_test.go @@ -32,6 +32,8 @@ func TestAccExpressRouteCircuit(t *testing.T) { "requiresImport": testAccExpressRouteCircuit_requiresImport, "data_basic": testAccDataSourceExpressRoute_basicMetered, "bandwidthReduction": testAccExpressRouteCircuit_bandwidthReduction, + "port": testAccExpressRouteCircuit_withExpressRoutePort, + "updatePort": testAccExpressRouteCircuit_updateExpressRoutePort, }, "PrivatePeering": { "azurePrivatePeering": testAccExpressRouteCircuitPeering_azurePrivatePeering, @@ -70,7 +72,7 @@ func testAccExpressRouteCircuit_basicMetered(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_express_route_circuit", "test") r := ExpressRouteCircuitResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.basicMeteredConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -85,7 +87,7 @@ func testAccExpressRouteCircuit_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_express_route_circuit", "test") r := ExpressRouteCircuitResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.basicMeteredConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -103,7 +105,7 @@ func testAccExpressRouteCircuit_basicUnlimited(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_express_route_circuit", "test") r := ExpressRouteCircuitResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.basicUnlimitedConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -118,7 +120,7 @@ func testAccExpressRouteCircuit_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_express_route_circuit", "test") r := ExpressRouteCircuitResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.basicMeteredConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -140,7 +142,7 @@ func testAccExpressRouteCircuit_updateTags(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_express_route_circuit", "test") r := ExpressRouteCircuitResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.basicMeteredConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -162,7 +164,7 @@ func testAccExpressRouteCircuit_tierUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_express_route_circuit", "test") r := ExpressRouteCircuitResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.sku(data, "Standard", "MeteredData"), Check: acceptance.ComposeTestCheckFunc( @@ -184,7 +186,7 @@ func testAccExpressRouteCircuit_premiumMetered(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_express_route_circuit", "test") r := ExpressRouteCircuitResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.sku(data, "Premium", "MeteredData"), Check: acceptance.ComposeTestCheckFunc( @@ -201,7 +203,7 @@ func testAccExpressRouteCircuit_premiumUnlimited(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_express_route_circuit", "test") r := ExpressRouteCircuitResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.sku(data, "Premium", "UnlimitedData"), Check: acceptance.ComposeTestCheckFunc( @@ -218,7 +220,7 @@ func testAccExpressRouteCircuit_allowClassicOperationsUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_express_route_circuit", "test") r := ExpressRouteCircuitResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.allowClassicOperations(data, "false"), Check: acceptance.ComposeTestCheckFunc( @@ -240,7 +242,7 @@ func testAccExpressRouteCircuit_bandwidthReduction(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_express_route_circuit", "test") r := ExpressRouteCircuitResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.bandwidthReductionConfig(data, "100"), Check: acceptance.ComposeTestCheckFunc( @@ -258,6 +260,43 @@ func testAccExpressRouteCircuit_bandwidthReduction(t *testing.T) { }) } +func testAccExpressRouteCircuit_withExpressRoutePort(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_express_route_circuit", "test") + r := ExpressRouteCircuitResource{} + + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ + { + Config: r.withExpressRoutePort(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func testAccExpressRouteCircuit_updateExpressRoutePort(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_express_route_circuit", "test") + r := ExpressRouteCircuitResource{} + + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ + { + Config: r.withExpressRoutePort(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.updateExpressRoutePort(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func (t ExpressRouteCircuitResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := azure.ParseAzureResourceID(state.ID) if err != nil { @@ -505,3 +544,73 @@ resource "azurerm_express_route_circuit" "test" { } `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, bandwidth) } + +func (ExpressRouteCircuitResource) withExpressRoutePort(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-expressroutecircuit-%d" + location = "%s" +} + +resource "azurerm_express_route_port" "test" { + name = "acctest-ERP-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + peering_location = "Airtel-Chennai2-CLS" + bandwidth_in_gbps = 10 + encapsulation = "Dot1Q" +} + +resource "azurerm_express_route_circuit" "test" { + name = "acctest-ExpressRouteCircuit-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + express_route_port_id = azurerm_express_route_port.test.id + bandwidth_in_gbps = 5 + + sku { + tier = "Standard" + family = "MeteredData" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) +} + +func (ExpressRouteCircuitResource) updateExpressRoutePort(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-expressroutecircuit-%d" + location = "%s" +} + +resource "azurerm_express_route_port" "test" { + name = "acctest-ERP-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + peering_location = "Airtel-Chennai2-CLS" + bandwidth_in_gbps = 10 + encapsulation = "Dot1Q" +} + +resource "azurerm_express_route_circuit" "test" { + name = "acctest-ExpressRouteCircuit-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + express_route_port_id = azurerm_express_route_port.test.id + bandwidth_in_gbps = 10 + + sku { + tier = "Standard" + family = "MeteredData" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) +} diff --git a/azurerm/internal/services/network/express_route_connection_resource.go b/azurerm/internal/services/network/express_route_connection_resource.go new file mode 100644 index 000000000000..dc6caefbbfac --- /dev/null +++ b/azurerm/internal/services/network/express_route_connection_resource.go @@ -0,0 +1,377 @@ +package network + +import ( + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceExpressRouteConnection() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceExpressRouteConnectionCreate, + Read: resourceExpressRouteConnectionRead, + Update: resourceExpressRouteConnectionUpdate, + Delete: resourceExpressRouteConnectionDelete, + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.ExpressRouteConnectionID(id) + return err + }), + + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.ExpressRouteConnectionName, + }, + + "express_route_circuit_peering_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.ExpressRouteCircuitPeeringID, + }, + + "express_route_gateway_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.ExpressRouteGatewayID, + }, + + "authorization_key": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.IsUUID, + }, + + "enable_internet_security": { + Type: pluginsdk.TypeBool, + Optional: true, + }, + + "routing": { + Type: pluginsdk.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "associated_route_table_id": { + Type: pluginsdk.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validate.HubRouteTableID, + AtLeastOneOf: []string{"routing.0.associated_route_table_id", "routing.0.propagated_route_table"}, + }, + + "propagated_route_table": { + Type: pluginsdk.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "labels": { + Type: pluginsdk.TypeSet, + Optional: true, + Computed: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringIsNotEmpty, + }, + AtLeastOneOf: []string{"routing.0.propagated_route_table.0.labels", "routing.0.propagated_route_table.0.route_table_ids"}, + }, + + "route_table_ids": { + Type: pluginsdk.TypeList, + Optional: true, + Computed: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validate.HubRouteTableID, + }, + AtLeastOneOf: []string{"routing.0.propagated_route_table.0.labels", "routing.0.propagated_route_table.0.route_table_ids"}, + }, + }, + }, + AtLeastOneOf: []string{"routing.0.associated_route_table_id", "routing.0.propagated_route_table"}, + }, + }, + }, + }, + + "routing_weight": { + Type: pluginsdk.TypeInt, + Optional: true, + Default: 0, + ValidateFunc: validation.IntBetween(0, 32000), + }, + }, + } +} + +func resourceExpressRouteConnectionCreate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Network.ExpressRouteConnectionsClient + ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) + defer cancel() + + expressRouteGatewayId, err := parse.ExpressRouteGatewayID(d.Get("express_route_gateway_id").(string)) + if err != nil { + return err + } + + id := parse.NewExpressRouteConnectionID(expressRouteGatewayId.SubscriptionId, expressRouteGatewayId.ResourceGroup, expressRouteGatewayId.Name, d.Get("name").(string)) + + existing, err := client.Get(ctx, id.ResourceGroup, id.ExpressRouteGatewayName, id.Name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("checking for existing %s: %+v", id, err) + } + } + + if !utils.ResponseWasNotFound(existing.Response) { + return tf.ImportAsExistsError("azurerm_express_route_connection", id.ID()) + } + + parameters := network.ExpressRouteConnection{ + Name: utils.String(id.Name), + ExpressRouteConnectionProperties: &network.ExpressRouteConnectionProperties{ + ExpressRouteCircuitPeering: &network.ExpressRouteCircuitPeeringID{ + ID: utils.String(d.Get("express_route_circuit_peering_id").(string)), + }, + EnableInternetSecurity: utils.Bool(d.Get("enable_internet_security").(bool)), + RoutingConfiguration: expandExpressRouteConnectionRouting(d.Get("routing").([]interface{})), + RoutingWeight: utils.Int32(int32(d.Get("routing_weight").(int))), + }, + } + + if v, ok := d.GetOk("authorization_key"); ok { + parameters.ExpressRouteConnectionProperties.AuthorizationKey = utils.String(v.(string)) + } + + future, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.ExpressRouteGatewayName, id.Name, parameters) + if err != nil { + return fmt.Errorf("creating %s: %+v", id, err) + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for creation of %s: %+v", id, err) + } + + d.SetId(id.ID()) + + return resourceExpressRouteConnectionRead(d, meta) +} + +func resourceExpressRouteConnectionRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Network.ExpressRouteConnectionsClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.ExpressRouteConnectionID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, id.ResourceGroup, id.ExpressRouteGatewayName, id.Name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + d.SetId("") + return nil + } + return fmt.Errorf("retrieving %s: %+v", *id, err) + } + + d.Set("name", id.Name) + d.Set("express_route_gateway_id", parse.NewExpressRouteGatewayID(id.SubscriptionId, id.ResourceGroup, id.ExpressRouteGatewayName).ID()) + + if props := resp.ExpressRouteConnectionProperties; props != nil { + d.Set("routing_weight", props.RoutingWeight) + d.Set("authorization_key", props.AuthorizationKey) + d.Set("enable_internet_security", props.EnableInternetSecurity) + + circuitPeeringID := "" + if v := props.ExpressRouteCircuitPeering; v != nil { + circuitPeeringID = *v.ID + } + peeringId, err := parse.ExpressRouteCircuitPeeringID(circuitPeeringID) + if err != nil { + return err + } + d.Set("express_route_circuit_peering_id", peeringId.ID()) + + routing, err := flattenExpressRouteConnectionRouting(props.RoutingConfiguration) + if err != nil { + return err + } + if err := d.Set("routing", routing); err != nil { + return fmt.Errorf("setting `routing`: %+v", err) + } + } + + return nil +} + +func resourceExpressRouteConnectionUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Network.ExpressRouteConnectionsClient + ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.ExpressRouteConnectionID(d.Id()) + if err != nil { + return err + } + + parameters := network.ExpressRouteConnection{ + Name: utils.String(id.Name), + ExpressRouteConnectionProperties: &network.ExpressRouteConnectionProperties{ + ExpressRouteCircuitPeering: &network.ExpressRouteCircuitPeeringID{ + ID: utils.String(d.Get("express_route_circuit_peering_id").(string)), + }, + EnableInternetSecurity: utils.Bool(d.Get("enable_internet_security").(bool)), + RoutingConfiguration: expandExpressRouteConnectionRouting(d.Get("routing").([]interface{})), + RoutingWeight: utils.Int32(int32(d.Get("routing_weight").(int))), + }, + } + + if v, ok := d.GetOk("authorization_key"); ok { + parameters.ExpressRouteConnectionProperties.AuthorizationKey = utils.String(v.(string)) + } + + future, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.ExpressRouteGatewayName, id.Name, parameters) + if err != nil { + return fmt.Errorf("updating %s: %+v", id, err) + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for update of %s: %+v", id, err) + } + + return resourceExpressRouteConnectionRead(d, meta) +} + +func resourceExpressRouteConnectionDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Network.ExpressRouteConnectionsClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.ExpressRouteConnectionID(d.Id()) + if err != nil { + return err + } + + future, err := client.Delete(ctx, id.ResourceGroup, id.ExpressRouteGatewayName, id.Name) + if err != nil { + return fmt.Errorf("deleting %s: %+v", *id, err) + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for deletion of %s: %+v", *id, err) + } + + return nil +} + +func expandExpressRouteConnectionRouting(input []interface{}) *network.RoutingConfiguration { + if len(input) == 0 || input[0] == nil { + return &network.RoutingConfiguration{} + } + + v := input[0].(map[string]interface{}) + result := network.RoutingConfiguration{} + + if associatedRouteTableId := v["associated_route_table_id"].(string); associatedRouteTableId != "" { + result.AssociatedRouteTable = &network.SubResource{ + ID: utils.String(associatedRouteTableId), + } + } + + if propagatedRouteTable := v["propagated_route_table"].([]interface{}); len(propagatedRouteTable) != 0 { + result.PropagatedRouteTables = expandExpressRouteConnectionPropagatedRouteTable(propagatedRouteTable) + } + + return &result +} + +func expandExpressRouteConnectionPropagatedRouteTable(input []interface{}) *network.PropagatedRouteTable { + if len(input) == 0 || input[0] == nil { + return &network.PropagatedRouteTable{} + } + + v := input[0].(map[string]interface{}) + + result := network.PropagatedRouteTable{} + + if labels := v["labels"].(*pluginsdk.Set).List(); len(labels) != 0 { + result.Labels = utils.ExpandStringSlice(labels) + } + + if routeTableIds := v["route_table_ids"].([]interface{}); len(routeTableIds) != 0 { + result.Ids = expandIDsToSubResources(routeTableIds) + } + + return &result +} + +func flattenExpressRouteConnectionRouting(input *network.RoutingConfiguration) ([]interface{}, error) { + if input == nil { + return []interface{}{}, nil + } + + associatedRouteTableId := "" + if input.AssociatedRouteTable != nil && input.AssociatedRouteTable.ID != nil { + associatedRouteTableId = *input.AssociatedRouteTable.ID + } + routeTableId, err := parse.HubRouteTableID(associatedRouteTableId) + if err != nil { + return nil, err + } + + return []interface{}{ + map[string]interface{}{ + "associated_route_table_id": routeTableId.ID(), + "propagated_route_table": flattenExpressRouteConnectionPropagatedRouteTable(input.PropagatedRouteTables), + }, + }, nil +} + +func flattenExpressRouteConnectionPropagatedRouteTable(input *network.PropagatedRouteTable) []interface{} { + if input == nil { + return make([]interface{}, 0) + } + + labels := make([]interface{}, 0) + if input.Labels != nil { + labels = utils.FlattenStringSlice(input.Labels) + } + + routeTableIds := make([]interface{}, 0) + if input.Ids != nil { + routeTableIds = flattenSubResourcesToIDs(input.Ids) + } + + return []interface{}{ + map[string]interface{}{ + "labels": labels, + "route_table_ids": routeTableIds, + }, + } +} diff --git a/azurerm/internal/services/network/express_route_connection_resource_test.go b/azurerm/internal/services/network/express_route_connection_resource_test.go new file mode 100644 index 000000000000..204625ac30bb --- /dev/null +++ b/azurerm/internal/services/network/express_route_connection_resource_test.go @@ -0,0 +1,246 @@ +package network_test + +import ( + "context" + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type ExpressRouteConnectionResource struct{} + +func TestAccExpressRouteConnection(t *testing.T) { + acceptance.RunTestsInSequence(t, map[string]map[string]func(t *testing.T){ + "Resource": { + "basic": testAccExpressRouteConnection_basic, + "requiresImport": testAccExpressRouteConnection_requiresImport, + "complete": testAccExpressRouteConnection_complete, + "update": testAccExpressRouteConnection_update, + }, + }) +} + +func testAccExpressRouteConnection_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_express_route_connection", "test") + r := ExpressRouteConnectionResource{} + + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That("azurerm_express_route_connection.test").Key("routing.0.associated_route_table_id").Exists(), + check.That("azurerm_express_route_connection.test").Key("routing.0.propagated_route_table.#").HasValue("1"), + ), + }, + data.ImportStep(), + }) +} + +func testAccExpressRouteConnection_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_express_route_connection", "test") + r := ExpressRouteConnectionResource{} + + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func testAccExpressRouteConnection_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_express_route_connection", "test") + r := ExpressRouteConnectionResource{} + + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func testAccExpressRouteConnection_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_express_route_connection", "test") + r := ExpressRouteConnectionResource{} + + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func (r ExpressRouteConnectionResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + expressRouteConnectionClient := client.Network.ExpressRouteConnectionsClient + id, err := parse.ExpressRouteConnectionID(state.ID) + if err != nil { + return nil, err + } + + resp, err := expressRouteConnectionClient.Get(ctx, id.ResourceGroup, id.ExpressRouteGatewayName, id.Name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return utils.Bool(false), nil + } + + return nil, fmt.Errorf("retrieving Express Route Connection %q: %+v", state.ID, err) + } + + return utils.Bool(resp.ExpressRouteConnectionProperties != nil), nil +} + +func (r ExpressRouteConnectionResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_express_route_connection" "test" { + name = "acctest-ExpressRouteConnection-%d" + express_route_gateway_id = azurerm_express_route_gateway.test.id + express_route_circuit_peering_id = azurerm_express_route_circuit_peering.test.id + + depends_on = [azurerm_virtual_hub_route_table.test] +} +`, r.template(data), data.RandomInteger) +} + +func (r ExpressRouteConnectionResource) requiresImport(data acceptance.TestData) string { + config := r.basic(data) + return fmt.Sprintf(` +%s + +resource "azurerm_express_route_connection" "import" { + name = azurerm_express_route_connection.test.name + express_route_gateway_id = azurerm_express_route_connection.test.express_route_gateway_id + express_route_circuit_peering_id = azurerm_express_route_connection.test.express_route_circuit_peering_id +} +`, config) +} + +func (r ExpressRouteConnectionResource) complete(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_express_route_connection" "test" { + name = "acctest-ExpressRouteConnection-%d" + express_route_gateway_id = azurerm_express_route_gateway.test.id + express_route_circuit_peering_id = azurerm_express_route_circuit_peering.test.id + routing_weight = 2 + authorization_key = "90f8db47-e25b-4b65-a68b-7743ced2a16b" + enable_internet_security = true + + routing { + associated_route_table_id = azurerm_virtual_hub_route_table.test.id + + propagated_route_table { + labels = ["label1"] + route_table_ids = [azurerm_virtual_hub_route_table.test.id] + } + } +} +`, r.template(data), data.RandomInteger) +} + +func (r ExpressRouteConnectionResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-erconnection-%d" + location = "%s" +} + +resource "azurerm_express_route_port" "test" { + name = "acctest-erp-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + peering_location = "CDC-Canberra" + bandwidth_in_gbps = 10 + encapsulation = "Dot1Q" +} + +resource "azurerm_express_route_circuit" "test" { + name = "acctest-erc-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + express_route_port_id = azurerm_express_route_port.test.id + bandwidth_in_gbps = 5 + + sku { + tier = "Premium" + family = "MeteredData" + } +} + +resource "azurerm_express_route_circuit_peering" "test" { + peering_type = "AzurePrivatePeering" + express_route_circuit_name = azurerm_express_route_circuit.test.name + resource_group_name = azurerm_resource_group.test.name + shared_key = "ItsASecret" + peer_asn = 100 + primary_peer_address_prefix = "192.168.1.0/30" + secondary_peer_address_prefix = "192.168.2.0/30" + vlan_id = 100 +} + +resource "azurerm_virtual_wan" "test" { + name = "acctest-vwan-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location +} + +resource "azurerm_virtual_hub" "test" { + name = "acctest-vhub-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + virtual_wan_id = azurerm_virtual_wan.test.id + address_prefix = "10.0.1.0/24" +} + +resource "azurerm_express_route_gateway" "test" { + name = "acctest-ergw-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + virtual_hub_id = azurerm_virtual_hub.test.id + scale_units = 1 +} + +resource "azurerm_virtual_hub_route_table" "test" { + name = "acctest-vhubrt-%d" + virtual_hub_id = azurerm_virtual_hub.test.id +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger) +} diff --git a/azurerm/internal/services/network/express_route_gateway_resource.go b/azurerm/internal/services/network/express_route_gateway_resource.go index 563ef9fb74a7..faa0e317b943 100644 --- a/azurerm/internal/services/network/express_route_gateway_resource.go +++ b/azurerm/internal/services/network/express_route_gateway_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" diff --git a/azurerm/internal/services/network/express_route_gateway_resource_test.go b/azurerm/internal/services/network/express_route_gateway_resource_test.go index 7661ed1d7e7b..491a959b50e3 100644 --- a/azurerm/internal/services/network/express_route_gateway_resource_test.go +++ b/azurerm/internal/services/network/express_route_gateway_resource_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" diff --git a/azurerm/internal/services/network/express_route_port_resource.go b/azurerm/internal/services/network/express_route_port_resource.go index 6a668e75b3a8..01a42ba6b1db 100644 --- a/azurerm/internal/services/network/express_route_port_resource.go +++ b/azurerm/internal/services/network/express_route_port_resource.go @@ -6,7 +6,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" @@ -143,8 +143,8 @@ func resourceArmExpressRoutePort() *pluginsdk.Resource { Required: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.Dot1Q), - string(network.QinQ), + string(network.ExpressRoutePortsEncapsulationDot1Q), + string(network.ExpressRoutePortsEncapsulationQinQ), }, false), }, diff --git a/azurerm/internal/services/network/ip_group_resource.go b/azurerm/internal/services/network/ip_group_resource.go index 11b43ce38db4..c0dba1a3426c 100644 --- a/azurerm/internal/services/network/ip_group_resource.go +++ b/azurerm/internal/services/network/ip_group_resource.go @@ -4,15 +4,14 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" - + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) diff --git a/azurerm/internal/services/network/local_network_gateway_data_source.go b/azurerm/internal/services/network/local_network_gateway_data_source.go new file mode 100644 index 000000000000..da4fad490186 --- /dev/null +++ b/azurerm/internal/services/network/local_network_gateway_data_source.go @@ -0,0 +1,138 @@ +package network + +import ( + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func dataSourceLocalNetworkGateway() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Read: dataSourceLocalNetworkGatewayRead, + + Timeouts: &pluginsdk.ResourceTimeout{ + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + }, + + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + }, + + "resource_group_name": azure.SchemaResourceGroupNameForDataSource(), + + "location": location.SchemaComputed(), + + "gateway_address": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "gateway_fqdn": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "address_space": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "bgp_settings": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "asn": { + Type: pluginsdk.TypeInt, + Required: true, + }, + + "bgp_peering_address": { + Type: pluginsdk.TypeString, + Required: true, + }, + + "peer_weight": { + Type: pluginsdk.TypeInt, + Computed: true, + }, + }, + }, + }, + + "tags": tags.SchemaDataSource(), + }, + } +} + +func dataSourceLocalNetworkGatewayRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Network.LocalNetworkGatewaysClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + name := d.Get("name").(string) + resourceGroup := d.Get("resource_group_name").(string) + + resp, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + d.SetId("") + return nil + } + + return fmt.Errorf("Error reading the state of Local Network Gateway %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + if resp.ID != nil { + d.SetId(*resp.ID) + } + + d.Set("name", resp.Name) + d.Set("resource_group_name", resourceGroup) + if location := resp.Location; location != nil { + d.Set("location", azure.NormalizeLocation(*location)) + } + + if props := resp.LocalNetworkGatewayPropertiesFormat; props != nil { + d.Set("gateway_address", props.GatewayIPAddress) + d.Set("gateway_fqdn", props.Fqdn) + + if lnas := props.LocalNetworkAddressSpace; lnas != nil { + d.Set("address_space", lnas.AddressPrefixes) + } + flattenedSettings := flattenLocalNetworkGatewayDataSourceBGPSettings(props.BgpSettings) + if err := d.Set("bgp_settings", flattenedSettings); err != nil { + return err + } + } + + return tags.FlattenAndSet(d, resp.Tags) +} + +func flattenLocalNetworkGatewayDataSourceBGPSettings(input *network.BgpSettings) []interface{} { + output := make(map[string]interface{}) + + if input == nil { + return []interface{}{} + } + + output["asn"] = int(*input.Asn) + output["bgp_peering_address"] = *input.BgpPeeringAddress + output["peer_weight"] = int(*input.PeerWeight) + + return []interface{}{output} +} diff --git a/azurerm/internal/services/network/local_network_gateway_data_source_test.go b/azurerm/internal/services/network/local_network_gateway_data_source_test.go new file mode 100644 index 000000000000..11131dece2ab --- /dev/null +++ b/azurerm/internal/services/network/local_network_gateway_data_source_test.go @@ -0,0 +1,68 @@ +package network_test + +import ( + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" +) + +type LocalNetworkGatewayDataSource struct { +} + +func TestAccDataSourceLocalNetworkGateway_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "data.azurerm_local_network_gateway", "test") + r := LocalNetworkGatewayDataSource{} + + data.DataSourceTest(t, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).Key("gateway_address").HasValue("127.0.0.1"), + check.That(data.ResourceName).Key("address_space.0").HasValue("127.0.1.0/24"), + check.That(data.ResourceName).Key("address_space.1").HasValue("127.0.0.0/24"), + check.That(data.ResourceName).Key("bgp_settings.#").HasValue("1"), + check.That(data.ResourceName).Key("bgp_settings.0.asn").HasValue("2468"), + check.That(data.ResourceName).Key("bgp_settings.0.bgp_peering_address").HasValue("10.104.1.1"), + check.That(data.ResourceName).Key("bgp_settings.0.peer_weight").HasValue("15"), + ), + }, + }) +} + +func (LocalNetworkGatewayDataSource) complete(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-lngw-%d" + location = "%s" +} + +resource "azurerm_local_network_gateway" "test" { + name = "acctestlng-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + gateway_address = "127.0.0.1" + address_space = ["127.0.1.0/24", "127.0.0.0/24"] + + bgp_settings { + asn = 2468 + bgp_peering_address = "10.104.1.1" + peer_weight = 15 + } + + tags = { + environment = "acctest" + } +} + +data "azurerm_local_network_gateway" "test" { + name = azurerm_local_network_gateway.test.name + resource_group_name = azurerm_local_network_gateway.test.resource_group_name +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +} diff --git a/azurerm/internal/services/network/local_network_gateway_resource.go b/azurerm/internal/services/network/local_network_gateway_resource.go index aeb2cc6ecb2c..1bd3f6533738 100644 --- a/azurerm/internal/services/network/local_network_gateway_resource.go +++ b/azurerm/internal/services/network/local_network_gateway_resource.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" diff --git a/azurerm/internal/services/network/nat_gateway_data_source.go b/azurerm/internal/services/network/nat_gateway_data_source.go index c585086ff66e..8eafd03f2528 100644 --- a/azurerm/internal/services/network/nat_gateway_data_source.go +++ b/azurerm/internal/services/network/nat_gateway_data_source.go @@ -4,10 +4,9 @@ import ( "fmt" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" diff --git a/azurerm/internal/services/network/nat_gateway_public_ip_association_resource.go b/azurerm/internal/services/network/nat_gateway_public_ip_association_resource.go index 806432bf13c1..9349e633e42f 100644 --- a/azurerm/internal/services/network/nat_gateway_public_ip_association_resource.go +++ b/azurerm/internal/services/network/nat_gateway_public_ip_association_resource.go @@ -6,7 +6,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" diff --git a/azurerm/internal/services/network/nat_gateway_public_ip_association_resource_test.go b/azurerm/internal/services/network/nat_gateway_public_ip_association_resource_test.go index cd2143590705..223c47effd31 100644 --- a/azurerm/internal/services/network/nat_gateway_public_ip_association_resource_test.go +++ b/azurerm/internal/services/network/nat_gateway_public_ip_association_resource_test.go @@ -6,7 +6,7 @@ import ( "strings" "testing" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" diff --git a/azurerm/internal/services/network/nat_gateway_public_ip_prefix_association_resource.go b/azurerm/internal/services/network/nat_gateway_public_ip_prefix_association_resource.go new file mode 100644 index 000000000000..796f1df260ff --- /dev/null +++ b/azurerm/internal/services/network/nat_gateway_public_ip_prefix_association_resource.go @@ -0,0 +1,218 @@ +package network + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceNATGatewayPublicIpPrefixAssociation() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceNATGatewayPublicIpPrefixAssociationCreate, + Read: resourceNATGatewayPublicIpPrefixAssociationRead, + Delete: resourceNATGatewayPublicIpPrefixAssociationDelete, + + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.NatGatewayPublicIPPrefixAssociationID(id) + return err + }), + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*pluginsdk.Schema{ + "nat_gateway_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.NatGatewayID, + }, + + "public_ip_prefix_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.PublicIpPrefixID, + }, + }, + } +} + +func resourceNATGatewayPublicIpPrefixAssociationCreate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Network.NatGatewayClient + ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) + defer cancel() + + log.Printf("[INFO] preparing arguments for NAT Gateway <-> Public IP Prefix Association creation.") + natGatewayId := d.Get("nat_gateway_id").(string) + publicIpPrefixId := d.Get("public_ip_prefix_id").(string) + parsedNatGatewayId, err := parse.NatGatewayID(natGatewayId) + if err != nil { + return err + } + + locks.ByName(parsedNatGatewayId.Name, natGatewayResourceName) + defer locks.UnlockByName(parsedNatGatewayId.Name, natGatewayResourceName) + + natGateway, err := client.Get(ctx, parsedNatGatewayId.ResourceGroup, parsedNatGatewayId.Name, "") + if err != nil { + if utils.ResponseWasNotFound(natGateway.Response) { + return fmt.Errorf("NAT Gateway %q (Resource Group %q) was not found.", parsedNatGatewayId.Name, parsedNatGatewayId.ResourceGroup) + } + return fmt.Errorf("failed to retrieve NAT Gateway %q (Resource Group %q): %+v", parsedNatGatewayId.Name, parsedNatGatewayId.ResourceGroup, err) + } + + id := fmt.Sprintf("%s|%s", *natGateway.ID, publicIpPrefixId) + publicIpPrefixes := make([]network.SubResource, 0) + if natGateway.PublicIPPrefixes != nil { + for _, existingPublicIPPrefix := range *natGateway.PublicIPPrefixes { + if existingPublicIPPrefix.ID == nil { + continue + } + + if strings.EqualFold(*existingPublicIPPrefix.ID, publicIpPrefixId) { + return tf.ImportAsExistsError("azurerm_nat_gateway_public_ip_prefix_association", id) + } + + publicIpPrefixes = append(publicIpPrefixes, existingPublicIPPrefix) + } + } + + publicIpPrefixes = append(publicIpPrefixes, network.SubResource{ + ID: utils.String(publicIpPrefixId), + }) + natGateway.PublicIPPrefixes = &publicIpPrefixes + + future, err := client.CreateOrUpdate(ctx, parsedNatGatewayId.ResourceGroup, parsedNatGatewayId.Name, natGateway) + if err != nil { + return fmt.Errorf("failed to update Public IP Prefix Association for NAT Gateway %q (Resource Group %q): %+v", parsedNatGatewayId.Name, parsedNatGatewayId.ResourceGroup, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("failed to wait for completion of Public IP Prefix Association for NAT Gateway %q (Resource Group %q): %+v", parsedNatGatewayId.Name, parsedNatGatewayId.ResourceGroup, err) + } + + d.SetId(id) + + return resourceNATGatewayPublicIpPrefixAssociationRead(d, meta) +} + +func resourceNATGatewayPublicIpPrefixAssociationRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Network.NatGatewayClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.NatGatewayPublicIPPrefixAssociationID(d.Id()) + if err != nil { + return err + } + + natGateway, err := client.Get(ctx, id.NatGateway.ResourceGroup, id.NatGateway.Name, "") + if err != nil { + if utils.ResponseWasNotFound(natGateway.Response) { + log.Printf("[DEBUG] NAT Gateway %q (Resource Group %q) could not be found - removing from state!", id.NatGateway.Name, id.NatGateway.ResourceGroup) + d.SetId("") + return nil + } + return fmt.Errorf("failed to retrieve NAT Gateway %q (Resource Group %q): %+v", id.NatGateway.Name, id.NatGateway.ResourceGroup, err) + } + + if natGateway.NatGatewayPropertiesFormat == nil { + return fmt.Errorf("`properties` was nil for NAT Gateway %q (Resource Group %q)", id.NatGateway.Name, id.NatGateway.ResourceGroup) + } + props := *natGateway.NatGatewayPropertiesFormat + + if props.PublicIPPrefixes == nil { + log.Printf("[DEBUG] NAT Gateway %q (Resource Group %q) doesn't have any Public IP Prefixes - removing from state!", id.NatGateway.Name, id.NatGateway.ResourceGroup) + d.SetId("") + return nil + } + + publicIPPrefixId := "" + for _, pipp := range *props.PublicIPPrefixes { + if pipp.ID == nil { + continue + } + + if strings.EqualFold(*pipp.ID, id.PublicIPPrefixID) { + publicIPPrefixId = *pipp.ID + break + } + } + + if publicIPPrefixId == "" { + log.Printf("[DEBUG] Association between NAT Gateway %q (Resource Group %q) and Public IP Prefix %q was not found - removing from state", id.NatGateway.Name, id.NatGateway.ResourceGroup, id.PublicIPPrefixID) + d.SetId("") + return nil + } + + d.Set("nat_gateway_id", natGateway.ID) + d.Set("public_ip_prefix_id", publicIPPrefixId) + + return nil +} + +func resourceNATGatewayPublicIpPrefixAssociationDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Network.NatGatewayClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.NatGatewayPublicIPPrefixAssociationID(d.Id()) + if err != nil { + return err + } + + locks.ByName(id.NatGateway.Name, natGatewayResourceName) + defer locks.UnlockByName(id.NatGateway.Name, natGatewayResourceName) + + natGateway, err := client.Get(ctx, id.NatGateway.ResourceGroup, id.NatGateway.Name, "") + if err != nil { + if utils.ResponseWasNotFound(natGateway.Response) { + return fmt.Errorf("NAT Gateway %q (Resource Group %q) was not found", id.NatGateway.Name, id.NatGateway.ResourceGroup) + } + + return fmt.Errorf("retrieving NAT Gateway %q (Resource Group %q): %+v", id.NatGateway.Name, id.NatGateway.ResourceGroup, err) + } + if natGateway.NatGatewayPropertiesFormat == nil { + return fmt.Errorf("retrieving NAT Gateway %q (Resource Group %q): `properties` was nil", id.NatGateway.Name, id.NatGateway.ResourceGroup) + } + + publicIpPrefixes := make([]network.SubResource, 0) + if publicIPPrefixes := natGateway.NatGatewayPropertiesFormat.PublicIPPrefixes; publicIPPrefixes != nil { + for _, publicIPPrefix := range *publicIPPrefixes { + if publicIPPrefix.ID == nil { + continue + } + + if !strings.EqualFold(*publicIPPrefix.ID, id.PublicIPPrefixID) { + publicIpPrefixes = append(publicIpPrefixes, publicIPPrefix) + } + } + } + natGateway.NatGatewayPropertiesFormat.PublicIPPrefixes = &publicIpPrefixes + + future, err := client.CreateOrUpdate(ctx, id.NatGateway.ResourceGroup, id.NatGateway.Name, natGateway) + if err != nil { + return fmt.Errorf("removing association between NAT Gateway %q (Resource Group %q) and Public IP Prefix %q: %+v", id.NatGateway.Name, id.NatGateway.ResourceGroup, id.PublicIPPrefixID, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for association between Public IP Prefix ID %q for NAT Gateway %q (Resource Group %q) to be removed: %+v", id.PublicIPPrefixID, id.NatGateway.Name, id.NatGateway.ResourceGroup, err) + } + + return nil +} diff --git a/azurerm/internal/services/network/nat_gateway_public_ip_prefix_association_resource_test.go b/azurerm/internal/services/network/nat_gateway_public_ip_prefix_association_resource_test.go new file mode 100644 index 000000000000..1ac0f00601a3 --- /dev/null +++ b/azurerm/internal/services/network/nat_gateway_public_ip_prefix_association_resource_test.go @@ -0,0 +1,202 @@ +package network_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type NatGatewayPublicIpPrefixAssociationResource struct { +} + +func TestAccNatGatewayPublicIpPrefixAssociation_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_nat_gateway_public_ip_prefix_association", "test") + r := NatGatewayPublicIpPrefixAssociationResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + // intentional as this is a Virtual Resource + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccNatGatewayPublicIpPrefixAssociation_updateNatGateway(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_nat_gateway_public_ip_prefix_association", "test") + r := NatGatewayPublicIpPrefixAssociationResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + // intentional as this is a Virtual Resource + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.updateNatGateway(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccNatGatewayPublicIpPrefixAssociation_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_nat_gateway_public_ip_prefix_association", "test") + r := NatGatewayPublicIpPrefixAssociationResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + // intentional as this is a Virtual Resource + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func TestAccNatGatewayPublicIpPrefixAssociation_deleted(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_nat_gateway_public_ip_prefix_association", "test") + r := NatGatewayPublicIpPrefixAssociationResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + // intentional as this is a Virtual Resource + data.DisappearsStep(acceptance.DisappearsStepData{ + Config: r.basic, + TestResource: r, + }), + }) +} + +func (t NatGatewayPublicIpPrefixAssociationResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.NatGatewayPublicIPPrefixAssociationID(state.ID) + if err != nil { + return nil, err + } + + resp, err := clients.Network.NatGatewayClient.Get(ctx, id.NatGateway.ResourceGroup, id.NatGateway.Name, "") + if err != nil { + return nil, fmt.Errorf("reading Nat Gateway Public IP Prefix Association (%s): %+v", id, err) + } + + return utils.Bool(resp.ID != nil), nil +} + +func (NatGatewayPublicIpPrefixAssociationResource) Destroy(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.NatGatewayPublicIPPrefixAssociationID(state.ID) + if err != nil { + return nil, err + } + + resp, err := client.Network.NatGatewayClient.Get(ctx, id.NatGateway.ResourceGroup, id.NatGateway.Name, "") + if err != nil { + return nil, fmt.Errorf("reading Nat Gateway Public IP Prefix Association (%s): %+v", id, err) + } + + updatedPrefixes := make([]network.SubResource, 0) + if publicIpPrefixes := resp.PublicIPPrefixes; publicIpPrefixes != nil { + for _, publicIpPrefix := range *publicIpPrefixes { + if !strings.EqualFold(*publicIpPrefix.ID, id.PublicIPPrefixID) { + updatedPrefixes = append(updatedPrefixes, publicIpPrefix) + } + } + } + resp.PublicIPPrefixes = &updatedPrefixes + + future, err := client.Network.NatGatewayClient.CreateOrUpdate(ctx, id.NatGateway.ResourceGroup, id.NatGateway.Name, resp) + if err != nil { + return nil, fmt.Errorf("failed to remove Nat Gateway Public IP Prefix Association for Nat Gateway %q: %+v", id, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Network.NatGatewayClient.Client); err != nil { + return nil, fmt.Errorf("failed to wait for removal of Nat Gateway Public IP Prefix Association for Nat Gateway %q: %+v", id, err) + } + + return utils.Bool(true), nil +} + +func (r NatGatewayPublicIpPrefixAssociationResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_nat_gateway" "test" { + name = "acctest-NatGateway-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + sku_name = "Standard" +} + +resource "azurerm_nat_gateway_public_ip_prefix_association" "test" { + nat_gateway_id = azurerm_nat_gateway.test.id + public_ip_prefix_id = azurerm_public_ip_prefix.test.id +} +`, r.template(data), data.RandomInteger) +} + +func (r NatGatewayPublicIpPrefixAssociationResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_nat_gateway_public_ip_prefix_association" "import" { + nat_gateway_id = azurerm_nat_gateway_public_ip_prefix_association.test.nat_gateway_id + public_ip_prefix_id = azurerm_nat_gateway_public_ip_prefix_association.test.public_ip_prefix_id +} +`, r.basic(data)) +} + +func (r NatGatewayPublicIpPrefixAssociationResource) updateNatGateway(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_nat_gateway" "test" { + name = "acctest-NatGateway-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + sku_name = "Standard" + tags = { + Hello = "World" + } +} + +resource "azurerm_nat_gateway_public_ip_prefix_association" "test" { + nat_gateway_id = azurerm_nat_gateway.test.id + public_ip_prefix_id = azurerm_public_ip_prefix.test.id +} +`, r.template(data), data.RandomInteger) +} + +func (NatGatewayPublicIpPrefixAssociationResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-ngpi-%d" + location = "%s" +} + +resource "azurerm_public_ip_prefix" "test" { + name = "acctestpublicIPPrefix-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + prefix_length = 30 + zones = ["1"] +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +} diff --git a/azurerm/internal/services/network/nat_gateway_resource.go b/azurerm/internal/services/network/nat_gateway_resource.go index 9c4fbe42a1a2..0b52ed078e47 100644 --- a/azurerm/internal/services/network/nat_gateway_resource.go +++ b/azurerm/internal/services/network/nat_gateway_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" @@ -67,24 +67,27 @@ func resourceNatGateway() *pluginsdk.Resource { ValidateFunc: azure.ValidateResourceID, }, // TODO: remove in 3.0 - Deprecated: "Inline Public IP Address ID Deprecations have been deprecated in favour of the `azurerm_nat_gateway_public_ip_association` pluginsdk. This field will be removed in the next major version of the Azure Provider.", + Deprecated: "Inline Public IP Address ID Associations have been deprecated in favour of the `azurerm_nat_gateway_public_ip_association` pluginsdk. This field will be removed in the next major version of the Azure Provider.", }, "public_ip_prefix_ids": { Type: pluginsdk.TypeSet, Optional: true, + Computed: true, Elem: &pluginsdk.Schema{ Type: pluginsdk.TypeString, ValidateFunc: azure.ValidateResourceID, }, + // TODO: remove in 3.0 + Deprecated: "Inline Public IP Prefix ID Associations have been deprecated in favour of the `azurerm_nat_gateway_public_ip_prefix_association` pluginsdk. This field will be removed in the next major version of the Azure Provider.", }, "sku_name": { Type: pluginsdk.TypeString, Optional: true, - Default: string(network.Standard), + Default: string(network.NatGatewaySkuNameStandard), ValidateFunc: validation.StringInSlice([]string{ - string(network.Standard), + string(network.NatGatewaySkuNameStandard), }, false), }, diff --git a/azurerm/internal/services/network/network_connection_monitor_resource.go b/azurerm/internal/services/network/network_connection_monitor_resource.go index 0b9cbb894511..a32fd3a78dd3 100644 --- a/azurerm/internal/services/network/network_connection_monitor_resource.go +++ b/azurerm/internal/services/network/network_connection_monitor_resource.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" @@ -162,12 +162,12 @@ func resourceNetworkConnectionMonitor() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.AboveAverage), - string(network.Average), - string(network.BelowAverage), - string(network.Default), - string(network.Full), - string(network.Low), + string(network.CoverageLevelAboveAverage), + string(network.CoverageLevelAverage), + string(network.CoverageLevelBelowAverage), + string(network.CoverageLevelDefault), + string(network.CoverageLevelFull), + string(network.CoverageLevelLow), }, false), }, @@ -204,9 +204,9 @@ func resourceNetworkConnectionMonitor() *pluginsdk.Resource { "type": { Type: pluginsdk.TypeString, Optional: true, - Default: string(network.AgentAddress), + Default: string(network.ConnectionMonitorEndpointFilterItemTypeAgentAddress), ValidateFunc: validation.StringInSlice([]string{ - string(network.AgentAddress), + string(network.ConnectionMonitorEndpointFilterItemTypeAgentAddress), }, false), }, }, @@ -216,9 +216,9 @@ func resourceNetworkConnectionMonitor() *pluginsdk.Resource { "type": { Type: pluginsdk.TypeString, Optional: true, - Default: string(network.Include), + Default: string(network.ConnectionMonitorEndpointFilterTypeInclude), ValidateFunc: validation.StringInSlice([]string{ - string(network.Include), + string(network.ConnectionMonitorEndpointFilterTypeInclude), }, false), }, }, @@ -254,12 +254,12 @@ func resourceNetworkConnectionMonitor() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.AzureSubnet), - string(network.AzureVM), - string(network.AzureVNet), - string(network.ExternalAddress), - string(network.MMAWorkspaceMachine), - string(network.MMAWorkspaceNetwork), + string(network.EndpointTypeAzureSubnet), + string(network.EndpointTypeAzureVM), + string(network.EndpointTypeAzureVNet), + string(network.EndpointTypeExternalAddress), + string(network.EndpointTypeMMAWorkspaceMachine), + string(network.EndpointTypeMMAWorkspaceNetwork), }, false), }, @@ -304,10 +304,10 @@ func resourceNetworkConnectionMonitor() *pluginsdk.Resource { "method": { Type: pluginsdk.TypeString, Optional: true, - Default: string(network.Get), + Default: string(network.HTTPConfigurationMethodGet), ValidateFunc: validation.StringInSlice([]string{ - string(network.Get), - string(network.Post), + string(network.HTTPConfigurationMethodGet), + string(network.HTTPConfigurationMethodPost), }, false), }, @@ -595,7 +595,7 @@ func resourceNetworkConnectionMonitorRead(d *pluginsdk.ResourceData, meta interf return fmt.Errorf("Error reading Connection Monitor %q (Watcher %q / Resource Group %q) %+v", id.Name, id.NetworkWatcherName, id.ResourceGroup, err) } - if resp.ConnectionMonitorType == network.SingleSourceDestination { + if resp.ConnectionMonitorType == network.ConnectionMonitorTypeSingleSourceDestination { return fmt.Errorf("the resource created via API version 2019-06-01 or before (a.k.a v1) isn't compatible to this version of provider. Please migrate to v2 pluginsdk.") } @@ -898,7 +898,7 @@ func expandNetworkConnectionMonitorOutput(input []interface{}) *[]network.Connec for _, item := range input { result := network.ConnectionMonitorOutput{ - Type: network.Workspace, + Type: network.OutputTypeWorkspace, WorkspaceSettings: &network.ConnectionMonitorWorkspaceSettings{ WorkspaceResourceID: utils.String(item.(string)), }, diff --git a/azurerm/internal/services/network/network_connection_monitor_resource_test.go b/azurerm/internal/services/network/network_connection_monitor_resource_test.go index 670add92041f..287e2786a2ed 100644 --- a/azurerm/internal/services/network/network_connection_monitor_resource_test.go +++ b/azurerm/internal/services/network/network_connection_monitor_resource_test.go @@ -21,7 +21,7 @@ func testAccNetworkConnectionMonitor_addressBasic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_connection_monitor", "test") r := NetworkConnectionMonitorResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.basicAddressConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -36,7 +36,7 @@ func testAccNetworkConnectionMonitor_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_connection_monitor", "test") r := NetworkConnectionMonitorResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.basicAddressConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -54,7 +54,7 @@ func testAccNetworkConnectionMonitor_addressComplete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_connection_monitor", "test") r := NetworkConnectionMonitorResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.completeAddressConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -69,7 +69,7 @@ func testAccNetworkConnectionMonitor_addressUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_connection_monitor", "test") r := NetworkConnectionMonitorResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.basicAddressConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -90,7 +90,7 @@ func testAccNetworkConnectionMonitor_vmBasic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_connection_monitor", "test") r := NetworkConnectionMonitorResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.basicVmConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -105,7 +105,7 @@ func testAccNetworkConnectionMonitor_vmComplete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_connection_monitor", "test") r := NetworkConnectionMonitorResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.completeVmConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -120,7 +120,7 @@ func testAccNetworkConnectionMonitor_vmUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_connection_monitor", "test") r := NetworkConnectionMonitorResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.basicVmConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -141,7 +141,7 @@ func testAccNetworkConnectionMonitor_destinationUpdate(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_connection_monitor", "test") r := NetworkConnectionMonitorResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.basicAddressConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -168,7 +168,7 @@ func testAccNetworkConnectionMonitor_missingDestination(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_connection_monitor", "test") r := NetworkConnectionMonitorResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.missingDestinationConfig(data), ExpectError: regexp.MustCompile("should have at least one destination"), @@ -180,7 +180,7 @@ func testAccNetworkConnectionMonitor_conflictingDestinations(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_connection_monitor", "test") r := NetworkConnectionMonitorResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.conflictingDestinationsConfig(data), ExpectError: regexp.MustCompile("don't allow creating different endpoints for the same VM"), @@ -192,7 +192,7 @@ func testAccNetworkConnectionMonitor_withAddressAndVirtualMachineId(t *testing.T data := acceptance.BuildTestData(t, "azurerm_network_connection_monitor", "test") r := NetworkConnectionMonitorResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.withAddressAndVirtualMachineIdConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -207,7 +207,7 @@ func testAccNetworkConnectionMonitor_httpConfiguration(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_connection_monitor", "test") r := NetworkConnectionMonitorResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.httpConfigurationConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -222,7 +222,7 @@ func testAccNetworkConnectionMonitor_icmpConfiguration(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_connection_monitor", "test") r := NetworkConnectionMonitorResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.icmpConfigurationConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -237,7 +237,7 @@ func testAccNetworkConnectionMonitor_endpointDeprecated(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_connection_monitor", "test") r := NetworkConnectionMonitorResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.endpointDeprecated(data), Check: acceptance.ComposeTestCheckFunc( @@ -266,7 +266,7 @@ func testAccNetworkConnectionMonitor_updateEndpointIPAddressAndCoverageLevel(t * data := acceptance.BuildTestData(t, "azurerm_network_connection_monitor", "test") r := NetworkConnectionMonitorResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.endpointIPAddressAndCoverageLevel(data), Check: acceptance.ComposeTestCheckFunc( diff --git a/azurerm/internal/services/network/network_ddos_protection_plan_resource.go b/azurerm/internal/services/network/network_ddos_protection_plan_resource.go index 18fcc8f76273..091d74193bfa 100644 --- a/azurerm/internal/services/network/network_ddos_protection_plan_resource.go +++ b/azurerm/internal/services/network/network_ddos_protection_plan_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" diff --git a/azurerm/internal/services/network/network_interface.go b/azurerm/internal/services/network/network_interface.go index e2366f2d6b4d..0316f2c368ce 100644 --- a/azurerm/internal/services/network/network_interface.go +++ b/azurerm/internal/services/network/network_interface.go @@ -1,7 +1,7 @@ package network import ( - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -120,7 +120,7 @@ func mapFieldsToNetworkInterface(input *[]network.InterfaceIPConfiguration, info continue } - if config.InterfaceIPConfigurationPropertiesFormat.PrivateIPAddressVersion != network.IPv4 { + if config.InterfaceIPConfigurationPropertiesFormat.PrivateIPAddressVersion != network.IPVersionIPv4 { continue } diff --git a/azurerm/internal/services/network/network_interface_application_gateway_association_resource.go b/azurerm/internal/services/network/network_interface_application_gateway_association_resource.go index 999869de9aea..5b31071f1d5a 100644 --- a/azurerm/internal/services/network/network_interface_application_gateway_association_resource.go +++ b/azurerm/internal/services/network/network_interface_application_gateway_association_resource.go @@ -6,7 +6,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" diff --git a/azurerm/internal/services/network/network_interface_application_gateway_association_resource_test.go b/azurerm/internal/services/network/network_interface_application_gateway_association_resource_test.go index ef55e010d44e..3b9e72f8d669 100644 --- a/azurerm/internal/services/network/network_interface_application_gateway_association_resource_test.go +++ b/azurerm/internal/services/network/network_interface_application_gateway_association_resource_test.go @@ -6,15 +6,14 @@ import ( "strings" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" - - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" network2 "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) type NetworkInterfaceApplicationGatewayBackendAddressPoolAssociationResource struct { diff --git a/azurerm/internal/services/network/network_interface_application_security_group_association_resource_test.go b/azurerm/internal/services/network/network_interface_application_security_group_association_resource_test.go index 35a47a710ca3..0f13bad3056c 100644 --- a/azurerm/internal/services/network/network_interface_application_security_group_association_resource_test.go +++ b/azurerm/internal/services/network/network_interface_application_security_group_association_resource_test.go @@ -6,7 +6,7 @@ import ( "strings" "testing" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" diff --git a/azurerm/internal/services/network/network_interface_backend_address_pool_association_resource.go b/azurerm/internal/services/network/network_interface_backend_address_pool_association_resource.go index 452566d654b4..d00b5535bc5d 100644 --- a/azurerm/internal/services/network/network_interface_backend_address_pool_association_resource.go +++ b/azurerm/internal/services/network/network_interface_backend_address_pool_association_resource.go @@ -6,7 +6,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" diff --git a/azurerm/internal/services/network/network_interface_backend_address_pool_association_resource_test.go b/azurerm/internal/services/network/network_interface_backend_address_pool_association_resource_test.go index 6b3877eb834e..09a5db556f62 100644 --- a/azurerm/internal/services/network/network_interface_backend_address_pool_association_resource_test.go +++ b/azurerm/internal/services/network/network_interface_backend_address_pool_association_resource_test.go @@ -6,7 +6,7 @@ import ( "strings" "testing" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" diff --git a/azurerm/internal/services/network/network_interface_helpers.go b/azurerm/internal/services/network/network_interface_helpers.go index c6a288a70c06..cfca268fb0b9 100644 --- a/azurerm/internal/services/network/network_interface_helpers.go +++ b/azurerm/internal/services/network/network_interface_helpers.go @@ -1,6 +1,6 @@ package network -import "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" +import "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" func FindNetworkInterfaceIPConfiguration(input *[]network.InterfaceIPConfiguration, name string) *network.InterfaceIPConfiguration { if input == nil { diff --git a/azurerm/internal/services/network/network_interface_locking.go b/azurerm/internal/services/network/network_interface_locking.go index b90f5d66405d..312ad99cf3bc 100644 --- a/azurerm/internal/services/network/network_interface_locking.go +++ b/azurerm/internal/services/network/network_interface_locking.go @@ -1,7 +1,7 @@ package network import ( - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" diff --git a/azurerm/internal/services/network/network_interface_nat_rule_association_resource.go b/azurerm/internal/services/network/network_interface_nat_rule_association_resource.go index bb08c6fb8a6a..940b826f927a 100644 --- a/azurerm/internal/services/network/network_interface_nat_rule_association_resource.go +++ b/azurerm/internal/services/network/network_interface_nat_rule_association_resource.go @@ -6,7 +6,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" diff --git a/azurerm/internal/services/network/network_interface_nat_rule_association_resource_test.go b/azurerm/internal/services/network/network_interface_nat_rule_association_resource_test.go index 555c62bc2ea9..69370f3ab7e0 100644 --- a/azurerm/internal/services/network/network_interface_nat_rule_association_resource_test.go +++ b/azurerm/internal/services/network/network_interface_nat_rule_association_resource_test.go @@ -6,7 +6,7 @@ import ( "strings" "testing" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" diff --git a/azurerm/internal/services/network/network_interface_network_security_group_association_resource.go b/azurerm/internal/services/network/network_interface_network_security_group_association_resource.go index 7a90a0e5082f..5ddc4a2c9637 100644 --- a/azurerm/internal/services/network/network_interface_network_security_group_association_resource.go +++ b/azurerm/internal/services/network/network_interface_network_security_group_association_resource.go @@ -6,7 +6,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/azuresdkhacks" diff --git a/azurerm/internal/services/network/network_interface_resource.go b/azurerm/internal/services/network/network_interface_resource.go index 718c2356189d..8002becccd75 100644 --- a/azurerm/internal/services/network/network_interface_resource.go +++ b/azurerm/internal/services/network/network_interface_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -79,10 +79,10 @@ func resourceNetworkInterface() *pluginsdk.Resource { "private_ip_address_version": { Type: pluginsdk.TypeString, Optional: true, - Default: string(network.IPv4), + Default: string(network.IPVersionIPv4), ValidateFunc: validation.StringInSlice([]string{ - string(network.IPv4), - string(network.IPv6), + string(network.IPVersionIPv4), + string(network.IPVersionIPv6), }, false), }, @@ -90,8 +90,8 @@ func resourceNetworkInterface() *pluginsdk.Resource { Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.Dynamic), - string(network.Static), + string(network.IPAllocationMethodDynamic), + string(network.IPAllocationMethodStatic), }, true), StateFunc: state.IgnoreCase, DiffSuppressFunc: suppress.CaseDifference, @@ -531,7 +531,7 @@ func expandNetworkInterfaceIPConfigurations(input []interface{}) (*[]network.Int PrivateIPAddressVersion: privateIpAddressVersion, } - if privateIpAddressVersion == network.IPv4 && subnetId == "" { + if privateIpAddressVersion == network.IPVersionIPv4 && subnetId == "" { return nil, fmt.Errorf("A Subnet ID must be specified for an IPv4 Network Interface.") } diff --git a/azurerm/internal/services/network/network_interface_resource_test.go b/azurerm/internal/services/network/network_interface_resource_test.go index c9f74f682ba7..02ebfa68f199 100644 --- a/azurerm/internal/services/network/network_interface_resource_test.go +++ b/azurerm/internal/services/network/network_interface_resource_test.go @@ -5,11 +5,10 @@ import ( "fmt" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) diff --git a/azurerm/internal/services/network/network_packet_capture_resource.go b/azurerm/internal/services/network/network_packet_capture_resource.go index 8965c2208276..63be815a1313 100644 --- a/azurerm/internal/services/network/network_packet_capture_resource.go +++ b/azurerm/internal/services/network/network_packet_capture_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" diff --git a/azurerm/internal/services/network/network_packet_capture_resource_test.go b/azurerm/internal/services/network/network_packet_capture_resource_test.go index d69a54f0a5c2..716402148dbe 100644 --- a/azurerm/internal/services/network/network_packet_capture_resource_test.go +++ b/azurerm/internal/services/network/network_packet_capture_resource_test.go @@ -20,7 +20,7 @@ func testAccNetworkPacketCapture_localDisk(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_packet_capture", "test") r := NetworkPacketCaptureResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.localDiskConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -35,7 +35,7 @@ func testAccNetworkPacketCapture_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_packet_capture", "test") r := NetworkPacketCaptureResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.localDiskConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -53,7 +53,7 @@ func testAccNetworkPacketCapture_storageAccount(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_packet_capture", "test") r := NetworkPacketCaptureResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.storageAccountConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -68,7 +68,7 @@ func testAccNetworkPacketCapture_storageAccountAndLocalDisk(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_packet_capture", "test") r := NetworkPacketCaptureResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.storageAccountAndLocalDiskConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -83,7 +83,7 @@ func testAccNetworkPacketCapture_withFilters(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_packet_capture", "test") r := NetworkPacketCaptureResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.localDiskConfigWithFilters(data), Check: acceptance.ComposeTestCheckFunc( diff --git a/azurerm/internal/services/network/network_profile_resource.go b/azurerm/internal/services/network/network_profile_resource.go index a0e60e50a7c3..88ef3e36f2a5 100644 --- a/azurerm/internal/services/network/network_profile_resource.go +++ b/azurerm/internal/services/network/network_profile_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" diff --git a/azurerm/internal/services/network/network_security_group_data_source.go b/azurerm/internal/services/network/network_security_group_data_source.go index faa904b1ed13..9ec087235918 100644 --- a/azurerm/internal/services/network/network_security_group_data_source.go +++ b/azurerm/internal/services/network/network_security_group_data_source.go @@ -4,11 +4,10 @@ import ( "fmt" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) diff --git a/azurerm/internal/services/network/network_security_group_resource.go b/azurerm/internal/services/network/network_security_group_resource.go index 45795ef40f2a..31012c3595c0 100644 --- a/azurerm/internal/services/network/network_security_group_resource.go +++ b/azurerm/internal/services/network/network_security_group_resource.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" multierror "github.com/hashicorp/go-multierror" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" diff --git a/azurerm/internal/services/network/network_security_rule_resource.go b/azurerm/internal/services/network/network_security_rule_resource.go index afc31f94258f..d4d483faaeab 100644 --- a/azurerm/internal/services/network/network_security_rule_resource.go +++ b/azurerm/internal/services/network/network_security_rule_resource.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" diff --git a/azurerm/internal/services/network/network_subresource.go b/azurerm/internal/services/network/network_subresource.go index 053a29fd9f71..e7ff1d5429a2 100644 --- a/azurerm/internal/services/network/network_subresource.go +++ b/azurerm/internal/services/network/network_subresource.go @@ -1,7 +1,7 @@ package network import ( - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) diff --git a/azurerm/internal/services/network/network_watcher_data_source_test.go b/azurerm/internal/services/network/network_watcher_data_source_test.go index 184120c23094..0619b4ce338f 100644 --- a/azurerm/internal/services/network/network_watcher_data_source_test.go +++ b/azurerm/internal/services/network/network_watcher_data_source_test.go @@ -18,7 +18,7 @@ func testAccDataSourceNetworkWatcher_basic(t *testing.T) { name := fmt.Sprintf("acctestnw-%d", data.RandomInteger) - data.DataSourceTest(t, []acceptance.TestStep{ + data.DataSourceTestInSequence(t, []acceptance.TestStep{ { Config: r.basicConfig(data), Check: acceptance.ComposeTestCheckFunc( diff --git a/azurerm/internal/services/network/network_watcher_flow_log_resource.go b/azurerm/internal/services/network/network_watcher_flow_log_resource.go index d3e4b624cb52..37e0fb574429 100644 --- a/azurerm/internal/services/network/network_watcher_flow_log_resource.go +++ b/azurerm/internal/services/network/network_watcher_flow_log_resource.go @@ -3,49 +3,21 @@ package network import ( "fmt" "log" - "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" - "github.com/hashicorp/go-azure-helpers/response" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -type NetworkWatcherFlowLogAccountID struct { - azure.ResourceID - NetworkWatcherName string - NetworkSecurityGroupID string -} - -func ParseNetworkWatcherFlowLogID(id string) (*NetworkWatcherFlowLogAccountID, error) { - parts := strings.Split(id, "/networkSecurityGroupId") - if len(parts) != 2 { - return nil, fmt.Errorf("Error: Network Watcher Flow Log ID could not be split on `/networkSecurityGroupId`: %s", id) - } - - watcherId, err := azure.ParseAzureResourceID(parts[0]) - if err != nil { - return nil, err - } - - watcherName, ok := watcherId.Path["networkWatchers"] - if !ok { - return nil, fmt.Errorf("Error: Unable to parse Network Watcher Flow Log ID: networkWatchers is missing from: %s", id) - } - - return &NetworkWatcherFlowLogAccountID{ - ResourceID: *watcherId, - NetworkWatcherName: watcherName, - NetworkSecurityGroupID: parts[1], - }, nil -} - func resourceNetworkWatcherFlowLog() *pluginsdk.Resource { return &pluginsdk.Resource{ Create: resourceNetworkWatcherFlowLogCreateUpdate, @@ -53,8 +25,10 @@ func resourceNetworkWatcherFlowLog() *pluginsdk.Resource { Update: resourceNetworkWatcherFlowLogCreateUpdate, Delete: resourceNetworkWatcherFlowLogDelete, - // TODO: replace this with an importer which validates the ID during import - Importer: pluginsdk.DefaultImporter(), + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := parse.FlowLogID(id) + return err + }), Timeouts: &pluginsdk.ResourceTimeout{ Create: pluginsdk.DefaultTimeout(30 * time.Minute), @@ -73,11 +47,19 @@ func resourceNetworkWatcherFlowLog() *pluginsdk.Resource { "resource_group_name": azure.SchemaResourceGroupName(), + "name": { + Type: pluginsdk.TypeString, + Computed: true, + // TODO 3.0: Make this required, and remove computed. + //Required: true, + //ValidateFunc: validate.NetworkWatcherFlowLogName, + }, + "network_security_group_id": { Type: pluginsdk.TypeString, Required: true, ForceNew: true, - ValidateFunc: azure.ValidateResourceID, + ValidateFunc: validate.NetworkSecurityGroupID, }, "storage_account_id": { @@ -158,6 +140,18 @@ func resourceNetworkWatcherFlowLog() *pluginsdk.Resource { Computed: true, ValidateFunc: validation.IntBetween(1, 2), }, + + "location": { + Type: pluginsdk.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: location.EnhancedValidate, + StateFunc: location.StateFunc, + DiffSuppressFunc: location.DiffSuppressFunc, + }, + + "tags": tags.Schema(), }, } } @@ -175,23 +169,41 @@ func azureRMSuppressFlowLogRetentionPolicyDaysDiff(_, old, _ string, d *pluginsd } func resourceNetworkWatcherFlowLogCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { - client := meta.(*clients.Client).Network.WatcherClient + client := meta.(*clients.Client).Network.FlowLogsClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) defer cancel() - networkWatcherName := d.Get("network_watcher_name").(string) resourceGroupName := d.Get("resource_group_name").(string) + networkWatcherName := d.Get("network_watcher_name").(string) networkSecurityGroupID := d.Get("network_security_group_id").(string) - storageAccountID := d.Get("storage_account_id").(string) - enabled := d.Get("enabled").(bool) - - parameters := network.FlowLogInformation{ - TargetResourceID: &networkSecurityGroupID, - FlowLogProperties: &network.FlowLogProperties{ - StorageID: &storageAccountID, - Enabled: &enabled, - RetentionPolicy: expandAzureRmNetworkWatcherFlowLogRetentionPolicy(d), + + // guaranteed via schema validation + nsgId, _ := parse.NetworkSecurityGroupID(networkSecurityGroupID) + id := parse.NewFlowLogID(subscriptionId, resourceGroupName, networkWatcherName, *nsgId) + + loc := d.Get("location").(string) + if loc == "" { + // Get the containing network watcher in order to reuse its location if the "location" is not specified. + watcherClient := meta.(*clients.Client).Network.WatcherClient + resp, err := watcherClient.Get(ctx, id.ResourceGroupName, id.NetworkWatcherName) + if err != nil { + return fmt.Errorf("retrieving %s: %v", parse.NewNetworkWatcherID(id.SubscriptionId, id.ResourceGroupName, id.NetworkWatcherName).ID(), err) + } + if resp.Location != nil { + loc = *resp.Location + } + } + + parameters := network.FlowLog{ + Location: utils.String(location.Normalize(loc)), + FlowLogPropertiesFormat: &network.FlowLogPropertiesFormat{ + TargetResourceID: utils.String(id.NetworkSecurityGroupID()), + StorageID: utils.String(d.Get("storage_account_id").(string)), + Enabled: utils.Bool(d.Get("enabled").(bool)), + RetentionPolicy: expandAzureRmNetworkWatcherFlowLogRetentionPolicy(d), }, + Tags: tags.Expand(d.Get("tags").(map[string]interface{})), } if _, ok := d.GetOk("traffic_analytics"); ok { @@ -203,150 +215,93 @@ func resourceNetworkWatcherFlowLogCreateUpdate(d *pluginsdk.ResourceData, meta i Version: utils.Int32(int32(version.(int))), } - parameters.FlowLogProperties.Format = format + parameters.Format = format } - future, err := client.SetFlowLogConfiguration(ctx, resourceGroupName, networkWatcherName, parameters) + future, err := client.CreateOrUpdate(ctx, id.ResourceGroupName, id.NetworkWatcherName, id.Name(), parameters) if err != nil { - return fmt.Errorf("Error setting Flow Log Configuration for target %q (Network Watcher %q / Resource Group %q): %+v", networkSecurityGroupID, networkWatcherName, resourceGroupName, err) + return fmt.Errorf("Error creating %q: %+v", id, err) } if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for completion of setting Flow Log Configuration for target %q (Network Watcher %q / Resource Group %q): %+v", networkSecurityGroupID, networkWatcherName, resourceGroupName, err) + return fmt.Errorf("Error waiting for completion of creating %q: %+v", id, err) } - resp, err := client.Get(ctx, resourceGroupName, networkWatcherName) - if err != nil { - return fmt.Errorf("Cannot read Network Watcher %q (Resource Group %q) err: %+v", networkWatcherName, resourceGroupName, err) - } - if resp.ID == nil { - return fmt.Errorf("Network Watcher %q is nil (Resource Group %q)", networkWatcherName, resourceGroupName) - } - - d.SetId(*resp.ID + "/networkSecurityGroupId" + networkSecurityGroupID) + d.SetId(id.ID()) return resourceNetworkWatcherFlowLogRead(d, meta) } func resourceNetworkWatcherFlowLogRead(d *pluginsdk.ResourceData, meta interface{}) error { - client := meta.(*clients.Client).Network.WatcherClient + client := meta.(*clients.Client).Network.FlowLogsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := ParseNetworkWatcherFlowLogID(d.Id()) + id, err := parse.FlowLogID(d.Id()) if err != nil { return err } // Get current flow log status - statusParameters := network.FlowLogStatusParameters{ - TargetResourceID: &id.NetworkSecurityGroupID, - } - - future, err := client.GetFlowLogStatus(ctx, id.ResourceGroup, id.NetworkWatcherName, statusParameters) + resp, err := client.Get(ctx, id.ResourceGroupName, id.NetworkWatcherName, id.Name()) if err != nil { - if !response.WasNotFound(future.Response()) { - // One of storage account, NSG, or flow log is missing - log.Printf("[INFO] Error getting Flow Log Configuration %q for target %q - removing from state", d.Id(), id.NetworkSecurityGroupID) + if utils.ResponseWasNotFound(resp.Response) { + log.Printf("[DEBUG] %s was not found - removing from state!", id) d.SetId("") return nil } - return fmt.Errorf("Error retrieving Flow Log Configuration for target %q (Network Watcher %q / Resource Group %q): %+v", id.NetworkSecurityGroupID, id.NetworkWatcherName, id.ResourceGroup, err) - } - - if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for retrieval of Flow Log Configuration for target %q (Network Watcher %q / Resource Group %q): %+v", id.NetworkSecurityGroupID, id.NetworkWatcherName, id.ResourceGroup, err) - } - - fli, err := future.Result(*client) - if err != nil { - return fmt.Errorf("Error retrieving Flow Log Configuration for target %q (Network Watcher %q / Resource Group %q): %+v", id.NetworkSecurityGroupID, id.NetworkWatcherName, id.ResourceGroup, err) + return fmt.Errorf("Error retrieving %q: %+v", id, err) } d.Set("network_watcher_name", id.NetworkWatcherName) - d.Set("resource_group_name", id.ResourceGroup) - - d.Set("network_security_group_id", fli.TargetResourceID) - if err := d.Set("traffic_analytics", flattenAzureRmNetworkWatcherFlowLogTrafficAnalytics(fli.FlowAnalyticsConfiguration)); err != nil { - return fmt.Errorf("Error setting `traffic_analytics`: %+v", err) - } + d.Set("resource_group_name", id.ResourceGroupName) + d.Set("network_security_group_id", id.NetworkSecurityGroupID()) + d.Set("location", location.NormalizeNilable(resp.Location)) + d.Set("name", resp.Name) + + if prop := resp.FlowLogPropertiesFormat; prop != nil { + if err := d.Set("traffic_analytics", flattenAzureRmNetworkWatcherFlowLogTrafficAnalytics(prop.FlowAnalyticsConfiguration)); err != nil { + return fmt.Errorf("Error setting `traffic_analytics`: %+v", err) + } - if props := fli.FlowLogProperties; props != nil { - d.Set("enabled", props.Enabled) + d.Set("enabled", prop.Enabled) - if format := props.Format; format != nil { + if format := prop.Format; format != nil { d.Set("version", format.Version) } // Azure API returns "" when flow log is disabled // Don't overwrite to prevent storage account ID diff when that is the case - if props.StorageID != nil && *props.StorageID != "" { - d.Set("storage_account_id", props.StorageID) + if prop.StorageID != nil && *prop.StorageID != "" { + d.Set("storage_account_id", prop.StorageID) } - if err := d.Set("retention_policy", flattenAzureRmNetworkWatcherFlowLogRetentionPolicy(props.RetentionPolicy)); err != nil { + if err := d.Set("retention_policy", flattenAzureRmNetworkWatcherFlowLogRetentionPolicy(prop.RetentionPolicy)); err != nil { return fmt.Errorf("Error setting `retention_policy`: %+v", err) } } - return nil + return tags.FlattenAndSet(d, resp.Tags) } func resourceNetworkWatcherFlowLogDelete(d *pluginsdk.ResourceData, meta interface{}) error { - client := meta.(*clients.Client).Network.WatcherClient + client := meta.(*clients.Client).Network.FlowLogsClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := ParseNetworkWatcherFlowLogID(d.Id()) + id, err := parse.FlowLogID(d.Id()) if err != nil { return err } - // Get current flow log status - statusParameters := network.FlowLogStatusParameters{ - TargetResourceID: &id.NetworkSecurityGroupID, - } - future, err := client.GetFlowLogStatus(ctx, id.ResourceGroup, id.NetworkWatcherName, statusParameters) + future, err := client.Delete(ctx, id.ResourceGroupName, id.NetworkWatcherName, id.Name()) if err != nil { - return fmt.Errorf("getting Flow Log Configuration for target %q (Network Watcher %q / Resource Group %q): %+v", id.NetworkSecurityGroupID, id.NetworkWatcherName, id.ResourceGroup, err) + return fmt.Errorf("deleting %s: %v", id, err) } - if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("waiting for retrieval of Flow Log Configuration for target %q (Network Watcher %q / Resource Group %q): %+v", id.NetworkSecurityGroupID, id.NetworkWatcherName, id.ResourceGroup, err) - } - - fli, err := future.Result(*client) - if err != nil { - return fmt.Errorf("retrieving Flow Log Configuration for target %q (Network Watcher %q / Resource Group %q): %+v", id.NetworkSecurityGroupID, id.NetworkWatcherName, id.ResourceGroup, err) - } - - // There is no delete in Azure API. Disabling flow log is effectively a delete in Terraform. - if props := fli.FlowLogProperties; props != nil { - if props.Enabled != nil && *props.Enabled { - props.Enabled = utils.Bool(false) - - param := network.FlowLogInformation{ - TargetResourceID: &id.NetworkSecurityGroupID, - FlowLogProperties: &network.FlowLogProperties{ - StorageID: utils.String(*fli.StorageID), - Enabled: utils.Bool(false), - }, - FlowAnalyticsConfiguration: &network.TrafficAnalyticsProperties{ - NetworkWatcherFlowAnalyticsConfiguration: &network.TrafficAnalyticsConfigurationProperties{ - Enabled: utils.Bool(false), - }, - }, - } - setFuture, err := client.SetFlowLogConfiguration(ctx, id.ResourceGroup, id.NetworkWatcherName, param) - if err != nil { - return fmt.Errorf("disabling Flow Log Configuration for target %q (Network Watcher %q / Resource Group %q): %+v", id.NetworkSecurityGroupID, id.NetworkWatcherName, id.ResourceGroup, err) - } - - if err = setFuture.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("waiting for completion of disabling Flow Log Configuration for target %q (Network Watcher %q / Resource Group %q): %+v", id.NetworkSecurityGroupID, id.NetworkWatcherName, id.ResourceGroup, err) - } - } + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for deletion of %s: %v", id, err) } return nil diff --git a/azurerm/internal/services/network/network_watcher_flow_log_resource_test.go b/azurerm/internal/services/network/network_watcher_flow_log_resource_test.go index 33c38ec6d025..11db8c6a0753 100644 --- a/azurerm/internal/services/network/network_watcher_flow_log_resource_test.go +++ b/azurerm/internal/services/network/network_watcher_flow_log_resource_test.go @@ -5,11 +5,10 @@ import ( "fmt" "testing" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" - azureNetwork "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -21,7 +20,7 @@ func testAccNetworkWatcherFlowLog_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_watcher_flow_log", "test") r := NetworkWatcherFlowLogResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.basicConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -44,7 +43,7 @@ func testAccNetworkWatcherFlowLog_disabled(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_watcher_flow_log", "test") r := NetworkWatcherFlowLogResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.disabledConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -67,7 +66,7 @@ func testAccNetworkWatcherFlowLog_reenabled(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_watcher_flow_log", "test") r := NetworkWatcherFlowLogResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.disabledConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -104,7 +103,7 @@ func testAccNetworkWatcherFlowLog_retentionPolicy(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_watcher_flow_log", "test") r := NetworkWatcherFlowLogResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.basicConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -142,7 +141,7 @@ func testAccNetworkWatcherFlowLog_updateStorageAccount(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_watcher_flow_log", "test") r := NetworkWatcherFlowLogResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.retentionPolicyConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -180,7 +179,7 @@ func testAccNetworkWatcherFlowLog_trafficAnalytics(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_watcher_flow_log", "test") r := NetworkWatcherFlowLogResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.basicConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -271,11 +270,28 @@ func testAccNetworkWatcherFlowLog_trafficAnalytics(t *testing.T) { }) } +// TODO 3.0: remove this test as we will validate the length for the `name` property, rather than truncate the name for the users. +func testAccNetworkWatcherFlowLog_longName(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_network_watcher_flow_log", "test") + r := NetworkWatcherFlowLogResource{} + + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ + { + Config: r.longName(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("name").HasValue("Microsoft.NetworkacctestRG-watcher-01234567890123456789012345678901acctestNSG012"), + ), + }, + data.ImportStep(), + }) +} + func testAccNetworkWatcherFlowLog_version(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_watcher_flow_log", "test") r := NetworkWatcherFlowLogResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.versionConfig(data, 1), Check: acceptance.ComposeTestCheckFunc( @@ -295,32 +311,55 @@ func testAccNetworkWatcherFlowLog_version(t *testing.T) { }) } +func testAccNetworkWatcherFlowLog_location(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_network_watcher_flow_log", "test") + r := NetworkWatcherFlowLogResource{} + + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ + { + Config: r.location(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func testAccNetworkWatcherFlowLog_tags(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_network_watcher_flow_log", "test") + r := NetworkWatcherFlowLogResource{} + + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ + { + Config: r.tags(data, "Test"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.tags(data, "Prod"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func (t NetworkWatcherFlowLogResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := azureNetwork.ParseNetworkWatcherFlowLogID(state.ID) + id, err := parse.FlowLogID(state.ID) if err != nil { return nil, err } - // Get current flow log status - statusParameters := network.FlowLogStatusParameters{ - TargetResourceID: &id.NetworkSecurityGroupID, - } - - future, err := clients.Network.WatcherClient.GetFlowLogStatus(ctx, id.ResourceGroup, id.NetworkWatcherName, statusParameters) + resp, err := clients.Network.FlowLogsClient.Get(ctx, id.ResourceGroupName, id.NetworkWatcherName, id.Name()) if err != nil { return nil, fmt.Errorf("reading Network Watcher Flow Log (%s): %+v", id, err) } - if err = future.WaitForCompletionRef(ctx, clients.Network.WatcherClient.Client); err != nil { - return nil, fmt.Errorf("waiting for retrieval of Flow Log Configuration for target %q: %+v", id, err) - } - - fli, err := future.Result(*clients.Network.WatcherClient) - if err != nil { - return nil, fmt.Errorf("retrieving Flow Log Configuration for target %q: %+v", id, err) - } - - return utils.Bool(fli.TargetResourceID != nil), nil + return utils.Bool(resp.ID != nil), nil } func (NetworkWatcherFlowLogResource) prerequisites(data acceptance.TestData) string { @@ -356,7 +395,7 @@ resource "azurerm_storage_account" "test" { account_replication_type = "LRS" enable_https_traffic_only = true } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger%1000000) +`, data.RandomIntOfLength(10), data.Locations.Primary, data.RandomIntOfLength(10), data.RandomInteger, data.RandomInteger%1000000) } func (r NetworkWatcherFlowLogResource) basicConfig(data acceptance.TestData) string { @@ -587,3 +626,100 @@ resource "azurerm_network_watcher_flow_log" "test" { } `, r.prerequisites(data), data.RandomInteger, version) } + +func (r NetworkWatcherFlowLogResource) location(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_network_watcher_flow_log" "test" { + network_watcher_name = azurerm_network_watcher.test.name + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + + network_security_group_id = azurerm_network_security_group.test.id + storage_account_id = azurerm_storage_account.test.id + enabled = true + + retention_policy { + enabled = false + days = 0 + } +} +`, r.prerequisites(data)) +} + +func (r NetworkWatcherFlowLogResource) tags(data acceptance.TestData, v string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_network_watcher_flow_log" "test" { + network_watcher_name = azurerm_network_watcher.test.name + resource_group_name = azurerm_resource_group.test.name + + network_security_group_id = azurerm_network_security_group.test.id + storage_account_id = azurerm_storage_account.test.id + enabled = true + + retention_policy { + enabled = false + days = 0 + } + + tags = { + env = "%s" + } +} +`, r.prerequisites(data), v) +} + +func (r NetworkWatcherFlowLogResource) longName(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + # 01234567890123456789012345678901234567890123456789 = 40 + name = "acctestRG-watcher-01234567890123456789012345678901" + location = "%s" +} + +resource "azurerm_network_security_group" "test" { + # 01234567890123456789012345678901234567890123456789 = 40 + name = "acctestNSG0123456789012345678901234567890123456789" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_network_watcher" "test" { + name = "acctest-NW-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_storage_account" "test" { + name = "acctestsa%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + + account_tier = "Standard" + account_kind = "StorageV2" + account_replication_type = "LRS" + enable_https_traffic_only = true +} + +resource "azurerm_network_watcher_flow_log" "test" { + network_watcher_name = azurerm_network_watcher.test.name + resource_group_name = azurerm_resource_group.test.name + + network_security_group_id = azurerm_network_security_group.test.id + storage_account_id = azurerm_storage_account.test.id + enabled = true + + retention_policy { + enabled = false + days = 0 + } +} +`, data.Locations.Primary, data.RandomInteger, data.RandomInteger%1000000) +} diff --git a/azurerm/internal/services/network/network_watcher_resource.go b/azurerm/internal/services/network/network_watcher_resource.go index 810477440f33..0b2a8141302d 100644 --- a/azurerm/internal/services/network/network_watcher_resource.go +++ b/azurerm/internal/services/network/network_watcher_resource.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" diff --git a/azurerm/internal/services/network/network_watcher_resource_test.go b/azurerm/internal/services/network/network_watcher_resource_test.go index 9c600090d8cb..65891fcf01cc 100644 --- a/azurerm/internal/services/network/network_watcher_resource_test.go +++ b/azurerm/internal/services/network/network_watcher_resource_test.go @@ -20,6 +20,11 @@ func TestAccNetworkWatcher(t *testing.T) { // NOTE: this is a combined test rather than separate split out tests due to // Azure only being happy about provisioning one per region at once // (which our test suite can't easily workaround) + + // NOTE: Normally these tests can be separated to its own test cases, rather than this big composite one, since + // we are not calling the `t.Parallel()` for each sub-test. However, currently nightly test are using the jen20/teamcity-go-test + // which will invoke a `go test` for each test function, which effectively making them to be in parallel, even if they are intended + // to be run in sequential. testCases := map[string]map[string]func(t *testing.T){ "basic": { "basic": testAccNetworkWatcher_basic, @@ -69,7 +74,10 @@ func TestAccNetworkWatcher(t *testing.T) { "retentionPolicy": testAccNetworkWatcherFlowLog_retentionPolicy, "updateStorageAccount": testAccNetworkWatcherFlowLog_updateStorageAccount, "trafficAnalytics": testAccNetworkWatcherFlowLog_trafficAnalytics, + "long_name": testAccNetworkWatcherFlowLog_longName, "version": testAccNetworkWatcherFlowLog_version, + "location": testAccNetworkWatcherFlowLog_location, + "tags": testAccNetworkWatcherFlowLog_tags, }, } @@ -90,7 +98,7 @@ func testAccNetworkWatcher_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_watcher", "test") r := NetworkWatcherResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.basicConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -104,7 +112,7 @@ func testAccNetworkWatcher_basic(t *testing.T) { func testAccNetworkWatcher_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_watcher", "test") r := NetworkWatcherResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.basicConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -121,7 +129,7 @@ func testAccNetworkWatcher_requiresImport(t *testing.T) { func testAccNetworkWatcher_complete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_watcher", "test") r := NetworkWatcherResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.completeConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -136,7 +144,7 @@ func testAccNetworkWatcher_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_watcher", "test") r := NetworkWatcherResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.basicConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -156,7 +164,7 @@ func testAccNetworkWatcher_disappears(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_network_watcher", "test") r := NetworkWatcherResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ data.DisappearsStep(acceptance.DisappearsStepData{ Config: r.basicConfig, TestResource: r, diff --git a/azurerm/internal/services/network/packet_capture_resource.go b/azurerm/internal/services/network/packet_capture_resource.go index 8602400192ab..6c01ddd3a4c0 100644 --- a/azurerm/internal/services/network/packet_capture_resource.go +++ b/azurerm/internal/services/network/packet_capture_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" diff --git a/azurerm/internal/services/network/packet_capture_resource_test.go b/azurerm/internal/services/network/packet_capture_resource_test.go index f9b9d91b3667..5e3de6d70e0f 100644 --- a/azurerm/internal/services/network/packet_capture_resource_test.go +++ b/azurerm/internal/services/network/packet_capture_resource_test.go @@ -20,7 +20,7 @@ func testAccPacketCapture_localDisk(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_packet_capture", "test") r := PacketCaptureResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.localDiskConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -35,7 +35,7 @@ func testAccPacketCapture_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_packet_capture", "test") r := PacketCaptureResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.localDiskConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -53,7 +53,7 @@ func testAccPacketCapture_storageAccount(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_packet_capture", "test") r := PacketCaptureResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.storageAccountConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -68,7 +68,7 @@ func testAccPacketCapture_storageAccountAndLocalDisk(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_packet_capture", "test") r := PacketCaptureResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.storageAccountAndLocalDiskConfig(data), Check: acceptance.ComposeTestCheckFunc( @@ -83,7 +83,7 @@ func testAccPacketCapture_withFilters(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_packet_capture", "test") r := PacketCaptureResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ + data.ResourceSequentialTest(t, r, []acceptance.TestStep{ { Config: r.localDiskConfigWithFilters(data), Check: acceptance.ComposeTestCheckFunc( @@ -188,7 +188,7 @@ resource "azurerm_virtual_machine" "test" { resource "azurerm_virtual_machine_extension" "test" { name = "network-watcher" - virtual_machine_id = azurerm_virtual_machine.src.id + virtual_machine_id = azurerm_virtual_machine.test.id publisher = "Microsoft.Azure.NetworkWatcher" type = "NetworkWatcherAgentLinux" type_handler_version = "1.4" diff --git a/azurerm/internal/services/network/parse/express_route_circuit.go b/azurerm/internal/services/network/parse/express_route_circuit.go new file mode 100644 index 000000000000..ba60a9aab8bf --- /dev/null +++ b/azurerm/internal/services/network/parse/express_route_circuit.go @@ -0,0 +1,69 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type ExpressRouteCircuitId struct { + SubscriptionId string + ResourceGroup string + Name string +} + +func NewExpressRouteCircuitID(subscriptionId, resourceGroup, name string) ExpressRouteCircuitId { + return ExpressRouteCircuitId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + Name: name, + } +} + +func (id ExpressRouteCircuitId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Express Route Circuit", segmentsStr) +} + +func (id ExpressRouteCircuitId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/expressRouteCircuits/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.Name) +} + +// ExpressRouteCircuitID parses a ExpressRouteCircuit ID into an ExpressRouteCircuitId struct +func ExpressRouteCircuitID(input string) (*ExpressRouteCircuitId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := ExpressRouteCircuitId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.Name, err = id.PopSegment("expressRouteCircuits"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/network/parse/express_route_circuit_connection.go b/azurerm/internal/services/network/parse/express_route_circuit_connection.go new file mode 100644 index 000000000000..032f5ac4b219 --- /dev/null +++ b/azurerm/internal/services/network/parse/express_route_circuit_connection.go @@ -0,0 +1,81 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type ExpressRouteCircuitConnectionId struct { + SubscriptionId string + ResourceGroup string + ExpressRouteCircuitName string + PeeringName string + ConnectionName string +} + +func NewExpressRouteCircuitConnectionID(subscriptionId, resourceGroup, expressRouteCircuitName, peeringName, connectionName string) ExpressRouteCircuitConnectionId { + return ExpressRouteCircuitConnectionId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + ExpressRouteCircuitName: expressRouteCircuitName, + PeeringName: peeringName, + ConnectionName: connectionName, + } +} + +func (id ExpressRouteCircuitConnectionId) String() string { + segments := []string{ + fmt.Sprintf("Connection Name %q", id.ConnectionName), + fmt.Sprintf("Peering Name %q", id.PeeringName), + fmt.Sprintf("Express Route Circuit Name %q", id.ExpressRouteCircuitName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Express Route Circuit Connection", segmentsStr) +} + +func (id ExpressRouteCircuitConnectionId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/expressRouteCircuits/%s/peerings/%s/connections/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.ExpressRouteCircuitName, id.PeeringName, id.ConnectionName) +} + +// ExpressRouteCircuitConnectionID parses a ExpressRouteCircuitConnection ID into an ExpressRouteCircuitConnectionId struct +func ExpressRouteCircuitConnectionID(input string) (*ExpressRouteCircuitConnectionId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := ExpressRouteCircuitConnectionId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.ExpressRouteCircuitName, err = id.PopSegment("expressRouteCircuits"); err != nil { + return nil, err + } + if resourceId.PeeringName, err = id.PopSegment("peerings"); err != nil { + return nil, err + } + if resourceId.ConnectionName, err = id.PopSegment("connections"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/network/parse/express_route_circuit_connection_test.go b/azurerm/internal/services/network/parse/express_route_circuit_connection_test.go new file mode 100644 index 000000000000..398802d76113 --- /dev/null +++ b/azurerm/internal/services/network/parse/express_route_circuit_connection_test.go @@ -0,0 +1,144 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = ExpressRouteCircuitConnectionId{} + +func TestExpressRouteCircuitConnectionIDFormatter(t *testing.T) { + actual := NewExpressRouteCircuitConnectionID("12345678-1234-9876-4563-123456789012", "resGroup1", "circuit1", "peering1", "connection1").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/circuit1/peerings/peering1/connections/connection1" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestExpressRouteCircuitConnectionID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ExpressRouteCircuitConnectionId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing ExpressRouteCircuitName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/", + Error: true, + }, + + { + // missing value for ExpressRouteCircuitName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/", + Error: true, + }, + + { + // missing PeeringName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/circuit1/", + Error: true, + }, + + { + // missing value for PeeringName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/circuit1/peerings/", + Error: true, + }, + + { + // missing ConnectionName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/circuit1/peerings/peering1/", + Error: true, + }, + + { + // missing value for ConnectionName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/circuit1/peerings/peering1/connections/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/circuit1/peerings/peering1/connections/connection1", + Expected: &ExpressRouteCircuitConnectionId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + ExpressRouteCircuitName: "circuit1", + PeeringName: "peering1", + ConnectionName: "connection1", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.NETWORK/EXPRESSROUTECIRCUITS/CIRCUIT1/PEERINGS/PEERING1/CONNECTIONS/CONNECTION1", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ExpressRouteCircuitConnectionID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.ExpressRouteCircuitName != v.Expected.ExpressRouteCircuitName { + t.Fatalf("Expected %q but got %q for ExpressRouteCircuitName", v.Expected.ExpressRouteCircuitName, actual.ExpressRouteCircuitName) + } + if actual.PeeringName != v.Expected.PeeringName { + t.Fatalf("Expected %q but got %q for PeeringName", v.Expected.PeeringName, actual.PeeringName) + } + if actual.ConnectionName != v.Expected.ConnectionName { + t.Fatalf("Expected %q but got %q for ConnectionName", v.Expected.ConnectionName, actual.ConnectionName) + } + } +} diff --git a/azurerm/internal/services/network/parse/express_route_circuit_peering.go b/azurerm/internal/services/network/parse/express_route_circuit_peering.go new file mode 100644 index 000000000000..eb55c135d97e --- /dev/null +++ b/azurerm/internal/services/network/parse/express_route_circuit_peering.go @@ -0,0 +1,75 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type ExpressRouteCircuitPeeringId struct { + SubscriptionId string + ResourceGroup string + ExpressRouteCircuitName string + PeeringName string +} + +func NewExpressRouteCircuitPeeringID(subscriptionId, resourceGroup, expressRouteCircuitName, peeringName string) ExpressRouteCircuitPeeringId { + return ExpressRouteCircuitPeeringId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + ExpressRouteCircuitName: expressRouteCircuitName, + PeeringName: peeringName, + } +} + +func (id ExpressRouteCircuitPeeringId) String() string { + segments := []string{ + fmt.Sprintf("Peering Name %q", id.PeeringName), + fmt.Sprintf("Express Route Circuit Name %q", id.ExpressRouteCircuitName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Express Route Circuit Peering", segmentsStr) +} + +func (id ExpressRouteCircuitPeeringId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/expressRouteCircuits/%s/peerings/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.ExpressRouteCircuitName, id.PeeringName) +} + +// ExpressRouteCircuitPeeringID parses a ExpressRouteCircuitPeering ID into an ExpressRouteCircuitPeeringId struct +func ExpressRouteCircuitPeeringID(input string) (*ExpressRouteCircuitPeeringId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := ExpressRouteCircuitPeeringId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.ExpressRouteCircuitName, err = id.PopSegment("expressRouteCircuits"); err != nil { + return nil, err + } + if resourceId.PeeringName, err = id.PopSegment("peerings"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/network/parse/express_route_circuit_peering_test.go b/azurerm/internal/services/network/parse/express_route_circuit_peering_test.go new file mode 100644 index 000000000000..39b00756af4b --- /dev/null +++ b/azurerm/internal/services/network/parse/express_route_circuit_peering_test.go @@ -0,0 +1,128 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = ExpressRouteCircuitPeeringId{} + +func TestExpressRouteCircuitPeeringIDFormatter(t *testing.T) { + actual := NewExpressRouteCircuitPeeringID("12345678-1234-9876-4563-123456789012", "resGroup1", "erCircuit1", "peering1").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/erCircuit1/peerings/peering1" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestExpressRouteCircuitPeeringID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ExpressRouteCircuitPeeringId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing ExpressRouteCircuitName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/", + Error: true, + }, + + { + // missing value for ExpressRouteCircuitName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/", + Error: true, + }, + + { + // missing PeeringName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/erCircuit1/", + Error: true, + }, + + { + // missing value for PeeringName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/erCircuit1/peerings/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/erCircuit1/peerings/peering1", + Expected: &ExpressRouteCircuitPeeringId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + ExpressRouteCircuitName: "erCircuit1", + PeeringName: "peering1", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.NETWORK/EXPRESSROUTECIRCUITS/ERCIRCUIT1/PEERINGS/PEERING1", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ExpressRouteCircuitPeeringID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.ExpressRouteCircuitName != v.Expected.ExpressRouteCircuitName { + t.Fatalf("Expected %q but got %q for ExpressRouteCircuitName", v.Expected.ExpressRouteCircuitName, actual.ExpressRouteCircuitName) + } + if actual.PeeringName != v.Expected.PeeringName { + t.Fatalf("Expected %q but got %q for PeeringName", v.Expected.PeeringName, actual.PeeringName) + } + } +} diff --git a/azurerm/internal/services/network/parse/express_route_circuit_test.go b/azurerm/internal/services/network/parse/express_route_circuit_test.go new file mode 100644 index 000000000000..afa42498692b --- /dev/null +++ b/azurerm/internal/services/network/parse/express_route_circuit_test.go @@ -0,0 +1,112 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = ExpressRouteCircuitId{} + +func TestExpressRouteCircuitIDFormatter(t *testing.T) { + actual := NewExpressRouteCircuitID("12345678-1234-9876-4563-123456789012", "resGroup1", "erCircuit1").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/erCircuit1" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestExpressRouteCircuitID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ExpressRouteCircuitId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/erCircuit1", + Expected: &ExpressRouteCircuitId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + Name: "erCircuit1", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.NETWORK/EXPRESSROUTECIRCUITS/ERCIRCUIT1", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ExpressRouteCircuitID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/network/parse/express_route_connection.go b/azurerm/internal/services/network/parse/express_route_connection.go new file mode 100644 index 000000000000..3ffd2f65972b --- /dev/null +++ b/azurerm/internal/services/network/parse/express_route_connection.go @@ -0,0 +1,75 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type ExpressRouteConnectionId struct { + SubscriptionId string + ResourceGroup string + ExpressRouteGatewayName string + Name string +} + +func NewExpressRouteConnectionID(subscriptionId, resourceGroup, expressRouteGatewayName, name string) ExpressRouteConnectionId { + return ExpressRouteConnectionId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + ExpressRouteGatewayName: expressRouteGatewayName, + Name: name, + } +} + +func (id ExpressRouteConnectionId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Express Route Gateway Name %q", id.ExpressRouteGatewayName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Express Route Connection", segmentsStr) +} + +func (id ExpressRouteConnectionId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/expressRouteGateways/%s/expressRouteConnections/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.ExpressRouteGatewayName, id.Name) +} + +// ExpressRouteConnectionID parses a ExpressRouteConnection ID into an ExpressRouteConnectionId struct +func ExpressRouteConnectionID(input string) (*ExpressRouteConnectionId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := ExpressRouteConnectionId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.ExpressRouteGatewayName, err = id.PopSegment("expressRouteGateways"); err != nil { + return nil, err + } + if resourceId.Name, err = id.PopSegment("expressRouteConnections"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/network/parse/express_route_connection_test.go b/azurerm/internal/services/network/parse/express_route_connection_test.go new file mode 100644 index 000000000000..30e902fe5c72 --- /dev/null +++ b/azurerm/internal/services/network/parse/express_route_connection_test.go @@ -0,0 +1,128 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = ExpressRouteConnectionId{} + +func TestExpressRouteConnectionIDFormatter(t *testing.T) { + actual := NewExpressRouteConnectionID("12345678-1234-9876-4563-123456789012", "resGroup1", "ergw1", "erConnection1").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteGateways/ergw1/expressRouteConnections/erConnection1" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestExpressRouteConnectionID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ExpressRouteConnectionId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing ExpressRouteGatewayName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/", + Error: true, + }, + + { + // missing value for ExpressRouteGatewayName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteGateways/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteGateways/ergw1/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteGateways/ergw1/expressRouteConnections/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteGateways/ergw1/expressRouteConnections/erConnection1", + Expected: &ExpressRouteConnectionId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + ExpressRouteGatewayName: "ergw1", + Name: "erConnection1", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.NETWORK/EXPRESSROUTEGATEWAYS/ERGW1/EXPRESSROUTECONNECTIONS/ERCONNECTION1", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ExpressRouteConnectionID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.ExpressRouteGatewayName != v.Expected.ExpressRouteGatewayName { + t.Fatalf("Expected %q but got %q for ExpressRouteGatewayName", v.Expected.ExpressRouteGatewayName, actual.ExpressRouteGatewayName) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/network/parse/express_route_gateway.go b/azurerm/internal/services/network/parse/express_route_gateway.go new file mode 100644 index 000000000000..c32da2f41fbf --- /dev/null +++ b/azurerm/internal/services/network/parse/express_route_gateway.go @@ -0,0 +1,69 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type ExpressRouteGatewayId struct { + SubscriptionId string + ResourceGroup string + Name string +} + +func NewExpressRouteGatewayID(subscriptionId, resourceGroup, name string) ExpressRouteGatewayId { + return ExpressRouteGatewayId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + Name: name, + } +} + +func (id ExpressRouteGatewayId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Express Route Gateway", segmentsStr) +} + +func (id ExpressRouteGatewayId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/expressRouteGateways/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.Name) +} + +// ExpressRouteGatewayID parses a ExpressRouteGateway ID into an ExpressRouteGatewayId struct +func ExpressRouteGatewayID(input string) (*ExpressRouteGatewayId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := ExpressRouteGatewayId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.Name, err = id.PopSegment("expressRouteGateways"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/network/parse/express_route_gateway_test.go b/azurerm/internal/services/network/parse/express_route_gateway_test.go new file mode 100644 index 000000000000..4e06c9110f3c --- /dev/null +++ b/azurerm/internal/services/network/parse/express_route_gateway_test.go @@ -0,0 +1,112 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = ExpressRouteGatewayId{} + +func TestExpressRouteGatewayIDFormatter(t *testing.T) { + actual := NewExpressRouteGatewayID("12345678-1234-9876-4563-123456789012", "resGroup1", "ergw1").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteGateways/ergw1" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestExpressRouteGatewayID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ExpressRouteGatewayId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/", + Error: true, + }, + + { + // missing value for Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteGateways/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteGateways/ergw1", + Expected: &ExpressRouteGatewayId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + Name: "ergw1", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.NETWORK/EXPRESSROUTEGATEWAYS/ERGW1", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ExpressRouteGatewayID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.Name != v.Expected.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expected.Name, actual.Name) + } + } +} diff --git a/azurerm/internal/services/network/parse/flow_log.go b/azurerm/internal/services/network/parse/flow_log.go new file mode 100644 index 000000000000..7f438bf68ffe --- /dev/null +++ b/azurerm/internal/services/network/parse/flow_log.go @@ -0,0 +1,82 @@ +package parse + +import ( + "fmt" + "strings" +) + +type FlowLogId struct { + SubscriptionId string + ResourceGroupName string + NetworkWatcherName string + nsgId NetworkSecurityGroupId +} + +func NewFlowLogID(subscriptionId, resourceGroup, networkWatcherName string, nsgId NetworkSecurityGroupId) FlowLogId { + return FlowLogId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroup, + NetworkWatcherName: networkWatcherName, + nsgId: nsgId, + } +} + +func (id FlowLogId) String() string { + segments := []string{ + fmt.Sprintf("Name %q", id.Name()), + fmt.Sprintf("Network Watcher Name %q", id.NetworkWatcherName), + fmt.Sprintf("Resource Group %q", id.ResourceGroupName), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Flow Log", segmentsStr) +} + +func (id FlowLogId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/networkWatchers/%s/networkSecurityGroupId" + "%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetworkWatcherName, id.nsgId.ID()) +} + +func (id FlowLogId) Name() string { + // The flow log name generated by the "configureFlowLog" endpoint is in below format: + // Microsoft.Network{nsg rg name}{nsg name} + // We follow this rule to ensure backward compatibility. + name := fmt.Sprintf("Microsoft.Network%s%s", id.nsgId.ResourceGroup, id.nsgId.Name) + + // TODO 3.0: remove below code and generate the ID parser via go generate + // Check whether the name is Longer than 80, which is the maximum allowed length for the flow log dedicated endpoint. + // If exceeded, we will truncate the name. + // This is needed since we are constructing the name from resource group name and NSG name. It makes no sense to ask + // users to constraint those names in order to make the flow log work. Also the Portal does that behind the scenes. + if len(name) > 80 { + name = name[:80] + } + return name +} + +func (id FlowLogId) NetworkSecurityGroupID() string { + return id.nsgId.ID() +} + +func FlowLogID(id string) (*FlowLogId, error) { + parts := strings.Split(id, "/networkSecurityGroupId") + if len(parts) != 2 { + return nil, fmt.Errorf("Error: Network Watcher Flow Log ID could not be split on `/networkSecurityGroupId`: %s", id) + } + + watcherId, err := NetworkWatcherID(parts[0]) + if err != nil { + return nil, err + } + + nsgId, err := NetworkSecurityGroupID(parts[1]) + if err != nil { + return nil, fmt.Errorf("parsing the networkSecurityGroupId inside the Network Watcher Flow Log ID: %v", err) + } + + return &FlowLogId{ + SubscriptionId: watcherId.SubscriptionId, + ResourceGroupName: watcherId.ResourceGroup, + NetworkWatcherName: watcherId.Name, + nsgId: *nsgId, + }, nil +} diff --git a/azurerm/internal/services/network/parse/nat_gateway_public_ip_prefix_association.go b/azurerm/internal/services/network/parse/nat_gateway_public_ip_prefix_association.go new file mode 100644 index 000000000000..7c73de21e8df --- /dev/null +++ b/azurerm/internal/services/network/parse/nat_gateway_public_ip_prefix_association.go @@ -0,0 +1,34 @@ +package parse + +import ( + "fmt" + "strings" +) + +type NatGatewayPublicIPPrefixAssociationId struct { + NatGateway NatGatewayId + PublicIPPrefixID string +} + +func NatGatewayPublicIPPrefixAssociationID(input string) (*NatGatewayPublicIPPrefixAssociationId, error) { + segments := strings.Split(input, "|") + if len(segments) != 2 { + return nil, fmt.Errorf("Expected an ID in the format `{natGatewayID}|{publicIPPrefixID} but got %q", input) + } + + natGatewayId, err := NatGatewayID(segments[0]) + if err != nil { + return nil, fmt.Errorf("parsing NAT Gateway ID %q: %+v", segments[0], err) + } + + // whilst we need the Resource ID, we may as well validate it + publicIPPrefix := segments[1] + if _, err := PublicIpPrefixID(publicIPPrefix); err != nil { + return nil, fmt.Errorf("parsing Public IP Address ID %q: %+v", publicIPPrefix, err) + } + + return &NatGatewayPublicIPPrefixAssociationId{ + NatGateway: *natGatewayId, + PublicIPPrefixID: publicIPPrefix, + }, nil +} diff --git a/azurerm/internal/services/network/parse/nat_gateway_public_ip_prefix_association_test.go b/azurerm/internal/services/network/parse/nat_gateway_public_ip_prefix_association_test.go new file mode 100644 index 000000000000..9fad24ab681e --- /dev/null +++ b/azurerm/internal/services/network/parse/nat_gateway_public_ip_prefix_association_test.go @@ -0,0 +1,78 @@ +package parse + +import ( + "testing" +) + +func TestNatGatewayPublicIPPrefixAssociationID(t *testing.T) { + testData := []struct { + Name string + Input string + Error bool + Expect *NatGatewayPublicIPPrefixAssociationId + }{ + { + Name: "Empty", + Input: "", + Error: true, + }, + { + Name: "One Segment", + Input: "hello", + Error: true, + }, + { + Name: "Two Segments Invalid ID's", + Input: "hello|world", + Error: true, + }, + { + Name: "Missing Nat Gateway Value", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/natGateways", + Error: true, + }, + { + Name: "Nat Gateway ID", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/natGateways/gateway1", + Error: true, + }, + { + Name: "Public IP Address ID", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/publicIPPrefixes/myPublicIPPrefix1", + Error: true, + }, + { + Name: "Nat Gateway / Public IP Association ID", + Input: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/natGateways/gateway1|/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/publicIPPrefixes/myPublicIPPrefix1", + Error: false, + Expect: &NatGatewayPublicIPPrefixAssociationId{ + NatGateway: NatGatewayId{ + Name: "gateway1", + ResourceGroup: "group1", + }, + PublicIPPrefixID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/publicIPPrefixes/myPublicIPPrefix1", + }, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Name) + + actual, err := NatGatewayPublicIPPrefixAssociationID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expected a value but got an error: %s", err) + } + + if actual.NatGateway.Name != v.Expect.NatGateway.Name { + t.Fatalf("Expected %q but got %q for Name", v.Expect.NatGateway.Name, actual.NatGateway.Name) + } + + if actual.NatGateway.ResourceGroup != v.Expect.NatGateway.ResourceGroup { + t.Fatalf("Expected %q but got %q for Resource Group", v.Expect.NatGateway.ResourceGroup, actual.NatGateway.ResourceGroup) + } + } +} diff --git a/azurerm/internal/services/network/parse/virtual_network_dns_servers.go b/azurerm/internal/services/network/parse/virtual_network_dns_servers.go new file mode 100644 index 000000000000..cd1f236b00bc --- /dev/null +++ b/azurerm/internal/services/network/parse/virtual_network_dns_servers.go @@ -0,0 +1,131 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + "strings" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +) + +type VirtualNetworkDnsServersId struct { + SubscriptionId string + ResourceGroup string + VirtualNetworkName string + DnsServerName string +} + +func NewVirtualNetworkDnsServersID(subscriptionId, resourceGroup, virtualNetworkName, dnsServerName string) VirtualNetworkDnsServersId { + return VirtualNetworkDnsServersId{ + SubscriptionId: subscriptionId, + ResourceGroup: resourceGroup, + VirtualNetworkName: virtualNetworkName, + DnsServerName: dnsServerName, + } +} + +func (id VirtualNetworkDnsServersId) String() string { + segments := []string{ + fmt.Sprintf("Dns Server Name %q", id.DnsServerName), + fmt.Sprintf("Virtual Network Name %q", id.VirtualNetworkName), + fmt.Sprintf("Resource Group %q", id.ResourceGroup), + } + segmentsStr := strings.Join(segments, " / ") + return fmt.Sprintf("%s: (%s)", "Virtual Network Dns Servers", segmentsStr) +} + +func (id VirtualNetworkDnsServersId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s/dnsServers/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroup, id.VirtualNetworkName, id.DnsServerName) +} + +// VirtualNetworkDnsServersID parses a VirtualNetworkDnsServers ID into an VirtualNetworkDnsServersId struct +func VirtualNetworkDnsServersID(input string) (*VirtualNetworkDnsServersId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := VirtualNetworkDnsServersId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + if resourceId.VirtualNetworkName, err = id.PopSegment("virtualNetworks"); err != nil { + return nil, err + } + if resourceId.DnsServerName, err = id.PopSegment("dnsServers"); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} + +// VirtualNetworkDnsServersIDInsensitively parses an VirtualNetworkDnsServers ID into an VirtualNetworkDnsServersId struct, insensitively +// This should only be used to parse an ID for rewriting, the VirtualNetworkDnsServersID +// method should be used instead for validation etc. +// +// Whilst this may seem strange, this enables Terraform have consistent casing +// which works around issues in Core, whilst handling broken API responses. +func VirtualNetworkDnsServersIDInsensitively(input string) (*VirtualNetworkDnsServersId, error) { + id, err := azure.ParseAzureResourceID(input) + if err != nil { + return nil, err + } + + resourceId := VirtualNetworkDnsServersId{ + SubscriptionId: id.SubscriptionID, + ResourceGroup: id.ResourceGroup, + } + + if resourceId.SubscriptionId == "" { + return nil, fmt.Errorf("ID was missing the 'subscriptions' element") + } + + if resourceId.ResourceGroup == "" { + return nil, fmt.Errorf("ID was missing the 'resourceGroups' element") + } + + // find the correct casing for the 'virtualNetworks' segment + virtualNetworksKey := "virtualNetworks" + for key := range id.Path { + if strings.EqualFold(key, virtualNetworksKey) { + virtualNetworksKey = key + break + } + } + if resourceId.VirtualNetworkName, err = id.PopSegment(virtualNetworksKey); err != nil { + return nil, err + } + + // find the correct casing for the 'dnsServers' segment + dnsServersKey := "dnsServers" + for key := range id.Path { + if strings.EqualFold(key, dnsServersKey) { + dnsServersKey = key + break + } + } + if resourceId.DnsServerName, err = id.PopSegment(dnsServersKey); err != nil { + return nil, err + } + + if err := id.ValidateNoEmptySegments(input); err != nil { + return nil, err + } + + return &resourceId, nil +} diff --git a/azurerm/internal/services/network/parse/virtual_network_dns_servers_test.go b/azurerm/internal/services/network/parse/virtual_network_dns_servers_test.go new file mode 100644 index 000000000000..532dd3b7dcbf --- /dev/null +++ b/azurerm/internal/services/network/parse/virtual_network_dns_servers_test.go @@ -0,0 +1,264 @@ +package parse + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/resourceid" +) + +var _ resourceid.Formatter = VirtualNetworkDnsServersId{} + +func TestVirtualNetworkDnsServersIDFormatter(t *testing.T) { + actual := NewVirtualNetworkDnsServersID("12345678-1234-9876-4563-123456789012", "resGroup1", "network1", "default").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/virtualNetworks/network1/dnsServers/default" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} + +func TestVirtualNetworkDnsServersID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VirtualNetworkDnsServersId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing VirtualNetworkName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/", + Error: true, + }, + + { + // missing value for VirtualNetworkName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/virtualNetworks/", + Error: true, + }, + + { + // missing DnsServerName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/virtualNetworks/network1/", + Error: true, + }, + + { + // missing value for DnsServerName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/virtualNetworks/network1/dnsServers/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/virtualNetworks/network1/dnsServers/default", + Expected: &VirtualNetworkDnsServersId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + VirtualNetworkName: "network1", + DnsServerName: "default", + }, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.NETWORK/VIRTUALNETWORKS/NETWORK1/DNSSERVERS/DEFAULT", + Error: true, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := VirtualNetworkDnsServersID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.VirtualNetworkName != v.Expected.VirtualNetworkName { + t.Fatalf("Expected %q but got %q for VirtualNetworkName", v.Expected.VirtualNetworkName, actual.VirtualNetworkName) + } + if actual.DnsServerName != v.Expected.DnsServerName { + t.Fatalf("Expected %q but got %q for DnsServerName", v.Expected.DnsServerName, actual.DnsServerName) + } + } +} + +func TestVirtualNetworkDnsServersIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VirtualNetworkDnsServersId + }{ + + { + // empty + Input: "", + Error: true, + }, + + { + // missing SubscriptionId + Input: "/", + Error: true, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Error: true, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Error: true, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Error: true, + }, + + { + // missing VirtualNetworkName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/", + Error: true, + }, + + { + // missing value for VirtualNetworkName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/virtualNetworks/", + Error: true, + }, + + { + // missing DnsServerName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/virtualNetworks/network1/", + Error: true, + }, + + { + // missing value for DnsServerName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/virtualNetworks/network1/dnsServers/", + Error: true, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/virtualNetworks/network1/dnsServers/default", + Expected: &VirtualNetworkDnsServersId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + VirtualNetworkName: "network1", + DnsServerName: "default", + }, + }, + + { + // lower-cased segment names + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/virtualnetworks/network1/dnsservers/default", + Expected: &VirtualNetworkDnsServersId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + VirtualNetworkName: "network1", + DnsServerName: "default", + }, + }, + + { + // upper-cased segment names + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/VIRTUALNETWORKS/network1/DNSSERVERS/default", + Expected: &VirtualNetworkDnsServersId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + VirtualNetworkName: "network1", + DnsServerName: "default", + }, + }, + + { + // mixed-cased segment names + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/ViRtUaLnEtWoRkS/network1/DnSsErVeRs/default", + Expected: &VirtualNetworkDnsServersId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroup: "resGroup1", + VirtualNetworkName: "network1", + DnsServerName: "default", + }, + }, + } + + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := VirtualNetworkDnsServersIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %s", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + if actual.ResourceGroup != v.Expected.ResourceGroup { + t.Fatalf("Expected %q but got %q for ResourceGroup", v.Expected.ResourceGroup, actual.ResourceGroup) + } + if actual.VirtualNetworkName != v.Expected.VirtualNetworkName { + t.Fatalf("Expected %q but got %q for VirtualNetworkName", v.Expected.VirtualNetworkName, actual.VirtualNetworkName) + } + if actual.DnsServerName != v.Expected.DnsServerName { + t.Fatalf("Expected %q but got %q for DnsServerName", v.Expected.DnsServerName, actual.DnsServerName) + } + } +} diff --git a/azurerm/internal/services/network/point_to_site_vpn_gateway_resource.go b/azurerm/internal/services/network/point_to_site_vpn_gateway_resource.go index 2d69d4308b89..87dfa4b318cc 100644 --- a/azurerm/internal/services/network/point_to_site_vpn_gateway_resource.go +++ b/azurerm/internal/services/network/point_to_site_vpn_gateway_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" diff --git a/azurerm/internal/services/network/private_endpoint_connection_data_source.go b/azurerm/internal/services/network/private_endpoint_connection_data_source.go index 609d9670c698..1a7b4dc1436f 100644 --- a/azurerm/internal/services/network/private_endpoint_connection_data_source.go +++ b/azurerm/internal/services/network/private_endpoint_connection_data_source.go @@ -5,11 +5,10 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" - + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" diff --git a/azurerm/internal/services/network/private_endpoint_resource.go b/azurerm/internal/services/network/private_endpoint_resource.go index 651d1b661ebf..8272d37fa664 100644 --- a/azurerm/internal/services/network/private_endpoint_resource.go +++ b/azurerm/internal/services/network/private_endpoint_resource.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" diff --git a/azurerm/internal/services/network/private_link_service_data_source.go b/azurerm/internal/services/network/private_link_service_data_source.go index f151118a4740..179a94e2f21e 100644 --- a/azurerm/internal/services/network/private_link_service_data_source.go +++ b/azurerm/internal/services/network/private_link_service_data_source.go @@ -4,12 +4,10 @@ import ( "fmt" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" - - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" - + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" diff --git a/azurerm/internal/services/network/private_link_service_endpoint_connections_data_source.go b/azurerm/internal/services/network/private_link_service_endpoint_connections_data_source.go index f0ddf4a933cc..ab844cf6c741 100644 --- a/azurerm/internal/services/network/private_link_service_endpoint_connections_data_source.go +++ b/azurerm/internal/services/network/private_link_service_endpoint_connections_data_source.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" diff --git a/azurerm/internal/services/network/private_link_service_resource.go b/azurerm/internal/services/network/private_link_service_resource.go index e3d9ecd437bf..4651b53c4cb8 100644 --- a/azurerm/internal/services/network/private_link_service_resource.go +++ b/azurerm/internal/services/network/private_link_service_resource.go @@ -6,7 +6,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" @@ -100,9 +100,9 @@ func resourcePrivateLinkService() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.IPv4), + string(network.IPVersionIPv4), }, false), - Default: string(network.IPv4), + Default: string(network.IPVersionIPv4), }, "subnet_id": { Type: pluginsdk.TypeString, @@ -122,6 +122,7 @@ func resourcePrivateLinkService() *pluginsdk.Resource { "load_balancer_frontend_ip_configuration_ids": { Type: pluginsdk.TypeSet, Required: true, + ForceNew: true, Elem: &pluginsdk.Schema{ Type: pluginsdk.TypeString, ValidateFunc: azure.ValidateResourceID, @@ -215,7 +216,7 @@ func resourcePrivateLinkServiceCreateUpdate(d *pluginsdk.ResourceData, meta inte stateConf.Timeout = d.Timeout(pluginsdk.TimeoutUpdate) } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("Error waiting for Private Link Service %q (Resource Group %q) to become available: %s", name, resourceGroup, err) } @@ -348,9 +349,9 @@ func expandPrivateLinkServiceIPConfiguration(input []interface{}) *[]network.Pri } if privateIpAddress != "" { - result.PrivateLinkServiceIPConfigurationProperties.PrivateIPAllocationMethod = network.Static + result.PrivateLinkServiceIPConfigurationProperties.PrivateIPAllocationMethod = network.IPAllocationMethodStatic } else { - result.PrivateLinkServiceIPConfigurationProperties.PrivateIPAllocationMethod = network.Dynamic + result.PrivateLinkServiceIPConfigurationProperties.PrivateIPAllocationMethod = network.IPAllocationMethodDynamic } results = append(results, result) diff --git a/azurerm/internal/services/network/public_ip_prefix_resource.go b/azurerm/internal/services/network/public_ip_prefix_resource.go index 867778e459b8..4b508b306dd0 100644 --- a/azurerm/internal/services/network/public_ip_prefix_resource.go +++ b/azurerm/internal/services/network/public_ip_prefix_resource.go @@ -5,10 +5,11 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" @@ -45,13 +46,31 @@ func resourcePublicIpPrefix() *pluginsdk.Resource { "resource_group_name": azure.SchemaResourceGroupName(), + "availability_zone": { + Type: pluginsdk.TypeString, + Optional: true, + //Default: "Zone-Redundant", + Computed: true, + ForceNew: true, + ConflictsWith: []string{ + "zones", + }, + ValidateFunc: validation.StringInSlice([]string{ + "No-Zone", + "1", + "2", + "3", + "Zone-Redundant", + }, false), + }, + "sku": { Type: pluginsdk.TypeString, Optional: true, ForceNew: true, - Default: string(network.Standard), + Default: string(network.PublicIPPrefixSkuNameStandard), ValidateFunc: validation.StringInSlice([]string{ - string(network.Standard), + string(network.PublicIPPrefixSkuNameStandard), }, false), }, @@ -68,7 +87,22 @@ func resourcePublicIpPrefix() *pluginsdk.Resource { Computed: true, }, - "zones": azure.SchemaSingleZone(), + // TODO - 3.0 make Computed only + "zones": { + Type: pluginsdk.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{ + "availability_zone", + }, + Deprecated: "This property has been deprecated in favour of `availability_zone` due to a breaking behavioural change in Azure: https://azure.microsoft.com/en-us/updates/zone-behavior-change/", + MaxItems: 1, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, "tags": tags.Schema(), }, @@ -99,9 +133,28 @@ func resourcePublicIpPrefixCreateUpdate(d *pluginsdk.ResourceData, meta interfac location := azure.NormalizeLocation(d.Get("location").(string)) sku := d.Get("sku").(string) - prefix_length := d.Get("prefix_length").(int) + prefixLength := d.Get("prefix_length").(int) t := d.Get("tags").(map[string]interface{}) - zones := azure.ExpandZones(d.Get("zones").([]interface{})) + + zones := &[]string{"1", "2"} + // TODO - Remove in 3.0 + if deprecatedZonesRaw, ok := d.GetOk("zones"); ok { + deprecatedZones := azure.ExpandZones(deprecatedZonesRaw.([]interface{})) + if deprecatedZones != nil { + zones = deprecatedZones + } + } + + if availabilityZones, ok := d.GetOk("availability_zone"); ok { + switch availabilityZones.(string) { + case "1", "2", "3": + zones = &[]string{availabilityZones.(string)} + case "Zone-Redundant": + zones = &[]string{"1", "2"} + case "No-Zone": + zones = &[]string{} + } + } publicIpPrefix := network.PublicIPPrefix{ Location: &location, @@ -109,7 +162,7 @@ func resourcePublicIpPrefixCreateUpdate(d *pluginsdk.ResourceData, meta interfac Name: network.PublicIPPrefixSkuName(sku), }, PublicIPPrefixPropertiesFormat: &network.PublicIPPrefixPropertiesFormat{ - PrefixLength: utils.Int32(int32(prefix_length)), + PrefixLength: utils.Int32(int32(prefixLength)), }, Tags: tags.Expand(t), Zones: zones, @@ -151,11 +204,24 @@ func resourcePublicIpPrefixRead(d *pluginsdk.ResourceData, meta interface{}) err d.Set("name", id.PublicIPPrefixeName) d.Set("resource_group_name", id.ResourceGroup) - d.Set("zones", resp.Zones) - if location := resp.Location; location != nil { - d.Set("location", azure.NormalizeLocation(*location)) + + availabilityZones := "No-Zone" + zonesDeprecated := make([]string, 0) + if resp.Zones != nil { + if len(*resp.Zones) > 1 { + availabilityZones = "Zone-Redundant" + } + if len(*resp.Zones) == 1 { + zones := *resp.Zones + availabilityZones = zones[0] + zonesDeprecated = zones + } } + d.Set("availability_zone", availabilityZones) + d.Set("zones", zonesDeprecated) + d.Set("location", location.NormalizeNilable(resp.Location)) + if sku := resp.Sku; sku != nil { d.Set("sku", string(sku.Name)) } diff --git a/azurerm/internal/services/network/public_ip_prefix_resource_test.go b/azurerm/internal/services/network/public_ip_prefix_resource_test.go index 5d801811f589..7cc6fa8a8edd 100644 --- a/azurerm/internal/services/network/public_ip_prefix_resource_test.go +++ b/azurerm/internal/services/network/public_ip_prefix_resource_test.go @@ -5,11 +5,10 @@ import ( "fmt" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -154,6 +153,39 @@ func TestAccPublicIpPrefix_disappears(t *testing.T) { }) } +func TestAccPublicIpPrefix_availabilityZoneRedundant(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_public_ip_prefix", "test") + r := PublicIPPrefixResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.withAvailabilityZone(data, "Zone-Redundant"), + }, + }) +} + +func TestAccPublicIpPrefix_availabilityZoneSingle(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_public_ip_prefix", "test") + r := PublicIPPrefixResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.withAvailabilityZone(data, "1"), + }, + }) +} + +func TestAccPublicIpPrefix_availabilityZoneSNoZone(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_public_ip_prefix", "test") + r := PublicIPPrefixResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.withAvailabilityZone(data, "No-Zone"), + }, + }) +} + func (PublicIPPrefixResource) basic(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { @@ -273,3 +305,23 @@ resource "azurerm_public_ip_prefix" "test" { } `, data.RandomInteger, data.Locations.Primary, data.RandomInteger) } + +func (PublicIPPrefixResource) withAvailabilityZone(data acceptance.TestData, availabilityZone string) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_public_ip_prefix" "test" { + name = "acctestpublicipprefix-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + availability_zone = "%s" +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, availabilityZone) +} diff --git a/azurerm/internal/services/network/public_ip_resource.go b/azurerm/internal/services/network/public_ip_resource.go index fc347e7b918c..92ce85bb984f 100644 --- a/azurerm/internal/services/network/public_ip_resource.go +++ b/azurerm/internal/services/network/public_ip_resource.go @@ -6,10 +6,11 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" @@ -55,20 +56,38 @@ func resourcePublicIp() *pluginsdk.Resource { Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.Static), - string(network.Dynamic), + string(network.IPAllocationMethodStatic), + string(network.IPAllocationMethodDynamic), + }, false), + }, + + "availability_zone": { + Type: pluginsdk.TypeString, + Optional: true, + //Default: "Zone-Redundant", + Computed: true, + ForceNew: true, + ConflictsWith: []string{ + "zones", + }, + ValidateFunc: validation.StringInSlice([]string{ + "No-Zone", + "1", + "2", + "3", + "Zone-Redundant", }, false), }, "ip_version": { Type: pluginsdk.TypeString, Optional: true, - Default: string(network.IPv4), + Default: string(network.IPVersionIPv4), ForceNew: true, DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.StringInSlice([]string{ - string(network.IPv4), - string(network.IPv6), + string(network.IPVersionIPv4), + string(network.IPVersionIPv6), }, true), }, @@ -128,7 +147,22 @@ func resourcePublicIp() *pluginsdk.Resource { }, }, - "zones": azure.SchemaSingleZone(), + // TODO - 3.0 make Computed only + "zones": { + Type: pluginsdk.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{ + "availability_zone", + }, + Deprecated: "This property has been deprecated in favour of `availability_zone` due to a breaking behavioural change in Azure: https://azure.microsoft.com/en-us/updates/zone-behavior-change/", + MaxItems: 1, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, "tags": tags.Schema(), }, @@ -143,26 +177,6 @@ func resourcePublicIpCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) e log.Printf("[INFO] preparing arguments for AzureRM Public IP creation.") - location := azure.NormalizeLocation(d.Get("location").(string)) - sku := d.Get("sku").(string) - t := d.Get("tags").(map[string]interface{}) - zones := azure.ExpandZones(d.Get("zones").([]interface{})) - idleTimeout := d.Get("idle_timeout_in_minutes").(int) - ipVersion := network.IPVersion(d.Get("ip_version").(string)) - ipAllocationMethod := d.Get("allocation_method").(string) - - if strings.EqualFold(sku, "basic") { - if zones != nil { - return fmt.Errorf("Basic SKU does not support Availability Zone scenarios. You need to use Standard SKU public IP for Availability Zone scenarios.") - } - } - - if strings.EqualFold(sku, "standard") { - if !strings.EqualFold(ipAllocationMethod, "static") { - return fmt.Errorf("Static IP allocation must be used when creating Standard SKU public IP addresses.") - } - } - id := parse.NewPublicIpAddressID(subscriptionId, d.Get("resource_group_name").(string), d.Get("name").(string)) if d.IsNewResource() { existing, err := client.Get(ctx, id.ResourceGroup, id.Name, "") @@ -177,6 +191,50 @@ func resourcePublicIpCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) e } } + location := azure.NormalizeLocation(d.Get("location").(string)) + sku := d.Get("sku").(string) + t := d.Get("tags").(map[string]interface{}) + // Default to Zone-Redundant - Legacy behaviour TODO - Switch to `No-Zone` in 3.0 to match service? + zones := &[]string{"1", "2"} + zonesSet := false + // TODO - Remove in 3.0 + if deprecatedZonesRaw, ok := d.GetOk("zones"); ok { + zonesSet = true + deprecatedZones := azure.ExpandZones(deprecatedZonesRaw.([]interface{})) + if deprecatedZones != nil { + zones = deprecatedZones + } + } + + if availabilityZones, ok := d.GetOk("availability_zone"); ok { + zonesSet = true + switch availabilityZones.(string) { + case "1", "2", "3": + zones = &[]string{availabilityZones.(string)} + case "Zone-Redundant": + zones = &[]string{"1", "2"} + case "No-Zone": + zones = &[]string{} + } + } + + if strings.EqualFold(sku, "Basic") { + if zonesSet && len(*zones) > 0 { + return fmt.Errorf("Availability Zones are not available on the `Basic` SKU") + } + zones = &[]string{} + } + + idleTimeout := d.Get("idle_timeout_in_minutes").(int) + ipVersion := network.IPVersion(d.Get("ip_version").(string)) + ipAllocationMethod := d.Get("allocation_method").(string) + + if strings.EqualFold(sku, "standard") { + if !strings.EqualFold(ipAllocationMethod, "static") { + return fmt.Errorf("Static IP allocation must be used when creating Standard SKU public IP addresses.") + } + } + publicIp := network.PublicIPAddress{ Name: utils.String(id.Name), Location: &location, @@ -267,11 +325,24 @@ func resourcePublicIpRead(d *pluginsdk.ResourceData, meta interface{}) error { d.Set("name", id.Name) d.Set("resource_group_name", id.ResourceGroup) - d.Set("zones", resp.Zones) - if location := resp.Location; location != nil { - d.Set("location", azure.NormalizeLocation(*location)) + + availabilityZones := "No-Zone" + zonesDeprecated := make([]string, 0) + if resp.Zones != nil { + if len(*resp.Zones) > 1 { + availabilityZones = "Zone-Redundant" + } + if len(*resp.Zones) == 1 { + zones := *resp.Zones + availabilityZones = zones[0] + zonesDeprecated = zones + } } + d.Set("availability_zone", availabilityZones) + d.Set("zones", zonesDeprecated) + d.Set("location", location.NormalizeNilable(resp.Location)) + if sku := resp.Sku; sku != nil { d.Set("sku", string(sku.Name)) } diff --git a/azurerm/internal/services/network/public_ip_resource_test.go b/azurerm/internal/services/network/public_ip_resource_test.go index 0b7c3b54ef3a..861293983e98 100644 --- a/azurerm/internal/services/network/public_ip_resource_test.go +++ b/azurerm/internal/services/network/public_ip_resource_test.go @@ -63,13 +63,52 @@ func TestAccPublicIpStatic_zones(t *testing.T) { data.ResourceTest(t, r, []acceptance.TestStep{ { - Config: r.withZone(data), + Config: r.withZone(data, "1"), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("ip_address").Exists(), check.That(data.ResourceName).Key("allocation_method").HasValue("Static"), - check.That(data.ResourceName).Key("zones.#").HasValue("1"), - check.That(data.ResourceName).Key("zones.0").HasValue("1"), + check.That(data.ResourceName).Key("zones.#").HasValue("1"), // Deprecated - TODO remove in 3.0 + check.That(data.ResourceName).Key("zones.0").HasValue("1"), // Deprecated - TODO remove in 3.0 + check.That(data.ResourceName).Key("availability_zone").HasValue("1"), + ), + }, + data.ImportStep(), + }) +} + +func TestAccPublicIpStatic_zonesNoZone(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_public_ip", "test") + r := PublicIPResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.withZone(data, "No-Zone"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("ip_address").Exists(), + check.That(data.ResourceName).Key("allocation_method").HasValue("Static"), + check.That(data.ResourceName).Key("zones.#").HasValue("0"), // Deprecated - TODO remove in 3.0 + check.That(data.ResourceName).Key("availability_zone").HasValue("No-Zone"), + ), + }, + data.ImportStep(), + }) +} + +func TestAccPublicIpStatic_zonesZoneRedundant(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_public_ip", "test") + r := PublicIPResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.withZone(data, "Zone-Redundant"), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("ip_address").Exists(), + check.That(data.ResourceName).Key("allocation_method").HasValue("Static"), + check.That(data.ResourceName).Key("zones.#").HasValue("0"), // Deprecated Note: Zero here due to legacy behaviour - TODO remove in 3.0 + check.That(data.ResourceName).Key("availability_zone").HasValue("Zone-Redundant"), ), }, data.ImportStep(), @@ -421,7 +460,7 @@ resource "azurerm_public_ip" "import" { `, r.static_basic(data)) } -func (PublicIPResource) withZone(data acceptance.TestData) string { +func (PublicIPResource) withZone(data acceptance.TestData, availabilityZone string) string { return fmt.Sprintf(` provider "azurerm" { features {} @@ -438,9 +477,9 @@ resource "azurerm_public_ip" "test" { resource_group_name = azurerm_resource_group.test.name allocation_method = "Static" sku = "Standard" - zones = ["1"] + availability_zone = "%s" } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, availabilityZone) } func (PublicIPResource) basic_withDNSLabel(data acceptance.TestData, dnsNameLabel string) string { @@ -771,9 +810,9 @@ resource "azurerm_public_ip" "test" { resource_group_name = azurerm_resource_group.test.name allocation_method = "Static" - domain_name_label = "k2345678-1-2345678-2-2345678-3-2345678-4-2345678-5-2345678-6-23" + domain_name_label = "%s" } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomStringOfLength(63)) } func (PublicIPResource) standard_IpTags(data acceptance.TestData) string { diff --git a/azurerm/internal/services/network/public_ips_data_source.go b/azurerm/internal/services/network/public_ips_data_source.go index 3a50d57f65f0..00942c8e1f51 100644 --- a/azurerm/internal/services/network/public_ips_data_source.go +++ b/azurerm/internal/services/network/public_ips_data_source.go @@ -6,7 +6,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" @@ -39,8 +39,8 @@ func dataSourcePublicIPs() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.Dynamic), - string(network.Static), + string(network.IPAllocationMethodDynamic), + string(network.IPAllocationMethodStatic), }, false), }, diff --git a/azurerm/internal/services/network/registration.go b/azurerm/internal/services/network/registration.go index b95094314050..3c28b8f790a6 100644 --- a/azurerm/internal/services/network/registration.go +++ b/azurerm/internal/services/network/registration.go @@ -46,67 +46,74 @@ func (r Registration) SupportedDataSources() map[string]*pluginsdk.Resource { "azurerm_virtual_network": dataSourceVirtualNetwork(), "azurerm_web_application_firewall_policy": dataWebApplicationFirewallPolicy(), "azurerm_virtual_wan": dataSourceVirtualWan(), + "azurerm_local_network_gateway": dataSourceLocalNetworkGateway(), } } // SupportedResources returns the supported Resources supported by this Service func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { return map[string]*pluginsdk.Resource{ - "azurerm_application_gateway": resourceApplicationGateway(), - "azurerm_application_security_group": resourceApplicationSecurityGroup(), - "azurerm_bastion_host": resourceBastionHost(), - "azurerm_express_route_circuit_authorization": resourceExpressRouteCircuitAuthorization(), - "azurerm_express_route_circuit_peering": resourceExpressRouteCircuitPeering(), - "azurerm_express_route_circuit": resourceExpressRouteCircuit(), - "azurerm_express_route_gateway": resourceExpressRouteGateway(), - "azurerm_express_route_port": resourceArmExpressRoutePort(), - "azurerm_ip_group": resourceIpGroup(), - "azurerm_local_network_gateway": resourceLocalNetworkGateway(), - "azurerm_nat_gateway": resourceNatGateway(), - "azurerm_network_connection_monitor": resourceNetworkConnectionMonitor(), - "azurerm_network_ddos_protection_plan": resourceNetworkDDoSProtectionPlan(), - "azurerm_network_interface": resourceNetworkInterface(), + "azurerm_application_gateway": resourceApplicationGateway(), + "azurerm_application_security_group": resourceApplicationSecurityGroup(), + "azurerm_bastion_host": resourceBastionHost(), + "azurerm_express_route_circuit_connection": resourceExpressRouteCircuitConnection(), + "azurerm_express_route_circuit_authorization": resourceExpressRouteCircuitAuthorization(), + "azurerm_express_route_circuit_peering": resourceExpressRouteCircuitPeering(), + "azurerm_express_route_circuit": resourceExpressRouteCircuit(), + "azurerm_express_route_connection": resourceExpressRouteConnection(), + "azurerm_express_route_gateway": resourceExpressRouteGateway(), + "azurerm_express_route_port": resourceArmExpressRoutePort(), + "azurerm_ip_group": resourceIpGroup(), + "azurerm_local_network_gateway": resourceLocalNetworkGateway(), + "azurerm_nat_gateway": resourceNatGateway(), + "azurerm_nat_gateway_public_ip_association": resourceNATGatewayPublicIpAssociation(), + "azurerm_nat_gateway_public_ip_prefix_association": resourceNATGatewayPublicIpPrefixAssociation(), + "azurerm_network_connection_monitor": resourceNetworkConnectionMonitor(), + "azurerm_network_ddos_protection_plan": resourceNetworkDDoSProtectionPlan(), + "azurerm_network_interface": resourceNetworkInterface(), + "azurerm_network_interface_application_gateway_backend_address_pool_association": resourceNetworkInterfaceApplicationGatewayBackendAddressPoolAssociation(), "azurerm_network_interface_application_security_group_association": resourceNetworkInterfaceApplicationSecurityGroupAssociation(), "azurerm_network_interface_backend_address_pool_association": resourceNetworkInterfaceBackendAddressPoolAssociation(), "azurerm_network_interface_nat_rule_association": resourceNetworkInterfaceNatRuleAssociation(), "azurerm_network_interface_security_group_association": resourceNetworkInterfaceSecurityGroupAssociation(), - "azurerm_network_packet_capture": resourceNetworkPacketCapture(), - "azurerm_network_profile": resourceNetworkProfile(), - "azurerm_packet_capture": resourcePacketCapture(), - "azurerm_point_to_site_vpn_gateway": resourcePointToSiteVPNGateway(), - "azurerm_private_endpoint": resourcePrivateEndpoint(), - "azurerm_private_link_service": resourcePrivateLinkService(), - "azurerm_public_ip": resourcePublicIp(), - "azurerm_nat_gateway_public_ip_association": resourceNATGatewayPublicIpAssociation(), - "azurerm_public_ip_prefix": resourcePublicIpPrefix(), - "azurerm_network_security_group": resourceNetworkSecurityGroup(), - "azurerm_network_security_rule": resourceNetworkSecurityRule(), - "azurerm_network_watcher_flow_log": resourceNetworkWatcherFlowLog(), - "azurerm_network_watcher": resourceNetworkWatcher(), - "azurerm_route_filter": resourceRouteFilter(), - "azurerm_route_table": resourceRouteTable(), - "azurerm_route": resourceRoute(), - "azurerm_virtual_hub_security_partner_provider": resourceVirtualHubSecurityPartnerProvider(), - "azurerm_subnet_service_endpoint_storage_policy": resourceSubnetServiceEndpointStoragePolicy(), - "azurerm_subnet_network_security_group_association": resourceSubnetNetworkSecurityGroupAssociation(), - "azurerm_subnet_route_table_association": resourceSubnetRouteTableAssociation(), - "azurerm_subnet_nat_gateway_association": resourceSubnetNatGatewayAssociation(), - "azurerm_subnet": resourceSubnet(), - "azurerm_virtual_hub": resourceVirtualHub(), - "azurerm_virtual_hub_bgp_connection": resourceVirtualHubBgpConnection(), - "azurerm_virtual_hub_connection": resourceVirtualHubConnection(), - "azurerm_virtual_hub_ip": resourceVirtualHubIP(), - "azurerm_virtual_hub_route_table": resourceVirtualHubRouteTable(), - "azurerm_virtual_network_gateway_connection": resourceVirtualNetworkGatewayConnection(), - "azurerm_virtual_network_gateway": resourceVirtualNetworkGateway(), - "azurerm_virtual_network_peering": resourceVirtualNetworkPeering(), - "azurerm_virtual_network": resourceVirtualNetwork(), - "azurerm_virtual_wan": resourceVirtualWan(), - "azurerm_vpn_gateway": resourceVPNGateway(), - "azurerm_vpn_gateway_connection": resourceVPNGatewayConnection(), - "azurerm_vpn_server_configuration": resourceVPNServerConfiguration(), - "azurerm_vpn_site": resourceVpnSite(), - "azurerm_web_application_firewall_policy": resourceWebApplicationFirewallPolicy(), + + "azurerm_network_packet_capture": resourceNetworkPacketCapture(), + "azurerm_network_profile": resourceNetworkProfile(), + "azurerm_packet_capture": resourcePacketCapture(), + "azurerm_point_to_site_vpn_gateway": resourcePointToSiteVPNGateway(), + "azurerm_private_endpoint": resourcePrivateEndpoint(), + "azurerm_private_link_service": resourcePrivateLinkService(), + "azurerm_public_ip": resourcePublicIp(), + "azurerm_public_ip_prefix": resourcePublicIpPrefix(), + "azurerm_network_security_group": resourceNetworkSecurityGroup(), + "azurerm_network_security_rule": resourceNetworkSecurityRule(), + "azurerm_network_watcher_flow_log": resourceNetworkWatcherFlowLog(), + "azurerm_network_watcher": resourceNetworkWatcher(), + "azurerm_route_filter": resourceRouteFilter(), + "azurerm_route_table": resourceRouteTable(), + "azurerm_route": resourceRoute(), + "azurerm_virtual_hub_security_partner_provider": resourceVirtualHubSecurityPartnerProvider(), + "azurerm_subnet_service_endpoint_storage_policy": resourceSubnetServiceEndpointStoragePolicy(), + "azurerm_subnet_network_security_group_association": resourceSubnetNetworkSecurityGroupAssociation(), + "azurerm_subnet_route_table_association": resourceSubnetRouteTableAssociation(), + "azurerm_subnet_nat_gateway_association": resourceSubnetNatGatewayAssociation(), + "azurerm_subnet": resourceSubnet(), + "azurerm_virtual_hub": resourceVirtualHub(), + "azurerm_virtual_hub_bgp_connection": resourceVirtualHubBgpConnection(), + "azurerm_virtual_hub_connection": resourceVirtualHubConnection(), + "azurerm_virtual_hub_ip": resourceVirtualHubIP(), + "azurerm_virtual_hub_route_table": resourceVirtualHubRouteTable(), + "azurerm_virtual_network_dns_servers": resourceVirtualNetworkDnsServers(), + "azurerm_virtual_network_gateway_connection": resourceVirtualNetworkGatewayConnection(), + "azurerm_virtual_network_gateway": resourceVirtualNetworkGateway(), + "azurerm_virtual_network_peering": resourceVirtualNetworkPeering(), + "azurerm_virtual_network": resourceVirtualNetwork(), + "azurerm_virtual_wan": resourceVirtualWan(), + "azurerm_vpn_gateway": resourceVPNGateway(), + "azurerm_vpn_gateway_connection": resourceVPNGatewayConnection(), + "azurerm_vpn_server_configuration": resourceVPNServerConfiguration(), + "azurerm_vpn_site": resourceVpnSite(), + "azurerm_web_application_firewall_policy": resourceWebApplicationFirewallPolicy(), } } diff --git a/azurerm/internal/services/network/resourceids.go b/azurerm/internal/services/network/resourceids.go index f7d1b3aa92a9..46fd6d6e52f0 100644 --- a/azurerm/internal/services/network/resourceids.go +++ b/azurerm/internal/services/network/resourceids.go @@ -13,6 +13,7 @@ package network //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=RouteTable -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/routeTables/routeTable1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=Subnet -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/virtualNetworks/network1/subnets/subnet1 -rewrite=true //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=VirtualNetwork -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/virtualNetworks/network1 -rewrite=true +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=VirtualNetworkDnsServers -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/virtualNetworks/network1/dnsServers/default -rewrite=true //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=ApplicationGatewayWebApplicationFirewallPolicy -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/applicationGatewayWebApplicationFirewallPolicy1 // Bastion @@ -60,3 +61,10 @@ package network // Virtual Network Gateway //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=VirtualNetworkGateway -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/virtualNetworkGateways/gw1 //go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=VirtualNetworkGatewayIpConfiguration -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/virtualNetworkGateways/gw1/ipConfigurations/cfg1 + +// Express Route Connection +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=ExpressRouteCircuit -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/erCircuit1 +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=ExpressRouteCircuitPeering -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/erCircuit1/peerings/peering1 +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=ExpressRouteGateway -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteGateways/ergw1 +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=ExpressRouteCircuitConnection -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/circuit1/peerings/peering1/connections/connection1 +//go:generate go run ../../tools/generator-resource-id/main.go -path=./ -name=ExpressRouteConnection -id=/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteGateways/ergw1/expressRouteConnections/erConnection1 diff --git a/azurerm/internal/services/network/route_filter_data_source.go b/azurerm/internal/services/network/route_filter_data_source.go index 90d415d16d9b..a50bf1d9446d 100644 --- a/azurerm/internal/services/network/route_filter_data_source.go +++ b/azurerm/internal/services/network/route_filter_data_source.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" diff --git a/azurerm/internal/services/network/route_filter_resource.go b/azurerm/internal/services/network/route_filter_resource.go index 700a3a6d5e98..9108ed9ed398 100644 --- a/azurerm/internal/services/network/route_filter_resource.go +++ b/azurerm/internal/services/network/route_filter_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" @@ -66,7 +66,7 @@ func resourceRouteFilter() *pluginsdk.Resource { Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.Allow), + string(network.AccessAllow), }, false), }, diff --git a/azurerm/internal/services/network/route_resource.go b/azurerm/internal/services/network/route_resource.go index 384aea5015d0..e4f157095fe1 100644 --- a/azurerm/internal/services/network/route_resource.go +++ b/azurerm/internal/services/network/route_resource.go @@ -4,15 +4,13 @@ import ( "fmt" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" - - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" - + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" diff --git a/azurerm/internal/services/network/route_table_data_source.go b/azurerm/internal/services/network/route_table_data_source.go index e7542a5e9dfe..d79a29bc2942 100644 --- a/azurerm/internal/services/network/route_table_data_source.go +++ b/azurerm/internal/services/network/route_table_data_source.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" diff --git a/azurerm/internal/services/network/route_table_resource.go b/azurerm/internal/services/network/route_table_resource.go index 0ab990fe705f..969fc0b0d39b 100644 --- a/azurerm/internal/services/network/route_table_resource.go +++ b/azurerm/internal/services/network/route_table_resource.go @@ -5,15 +5,13 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" - - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" - + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" diff --git a/azurerm/internal/services/network/subnet_data_source.go b/azurerm/internal/services/network/subnet_data_source.go index 76b1bcffe9a7..10e90bd1c468 100644 --- a/azurerm/internal/services/network/subnet_data_source.go +++ b/azurerm/internal/services/network/subnet_data_source.go @@ -110,8 +110,8 @@ func dataSourceSubnetRead(d *pluginsdk.ResourceData, meta interface{}) error { d.Set("address_prefixes", utils.FlattenStringSlice(props.AddressPrefixes)) } - d.Set("enforce_private_link_endpoint_network_policies", flattenSubnetPrivateLinkNetworkPolicy(props.PrivateEndpointNetworkPolicies)) - d.Set("enforce_private_link_service_network_policies", flattenSubnetPrivateLinkNetworkPolicy(props.PrivateLinkServiceNetworkPolicies)) + d.Set("enforce_private_link_endpoint_network_policies", flattenSubnetPrivateLinkNetworkPolicy(string(props.PrivateEndpointNetworkPolicies))) + d.Set("enforce_private_link_service_network_policies", flattenSubnetPrivateLinkNetworkPolicy(string(props.PrivateLinkServiceNetworkPolicies))) networkSecurityGroupId := "" if props.NetworkSecurityGroup != nil && props.NetworkSecurityGroup.ID != nil { diff --git a/azurerm/internal/services/network/subnet_nat_gateway_association_resource.go b/azurerm/internal/services/network/subnet_nat_gateway_association_resource.go index b18e4c9e893d..3a83442a923d 100644 --- a/azurerm/internal/services/network/subnet_nat_gateway_association_resource.go +++ b/azurerm/internal/services/network/subnet_nat_gateway_association_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" diff --git a/azurerm/internal/services/network/subnet_network_security_group_association_resource.go b/azurerm/internal/services/network/subnet_network_security_group_association_resource.go index ed7d9dff4f14..da8def427515 100644 --- a/azurerm/internal/services/network/subnet_network_security_group_association_resource.go +++ b/azurerm/internal/services/network/subnet_network_security_group_association_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -76,12 +76,12 @@ func resourceSubnetNetworkSecurityGroupAssociationCreate(d *pluginsdk.ResourceDa virtualNetworkName := parsedSubnetId.Path["virtualNetworks"] resourceGroup := parsedSubnetId.ResourceGroup - locks.ByName(subnetName, SubnetResourceName) - defer locks.UnlockByName(subnetName, SubnetResourceName) - locks.ByName(virtualNetworkName, VirtualNetworkResourceName) defer locks.UnlockByName(virtualNetworkName, VirtualNetworkResourceName) + locks.ByName(subnetName, SubnetResourceName) + defer locks.UnlockByName(subnetName, SubnetResourceName) + subnet, err := client.Get(ctx, resourceGroup, virtualNetworkName, subnetName, "") if err != nil { if utils.ResponseWasNotFound(subnet.Response) { diff --git a/azurerm/internal/services/network/subnet_resource.go b/azurerm/internal/services/network/subnet_resource.go index ef293b77f89a..5a2c68414af8 100644 --- a/azurerm/internal/services/network/subnet_resource.go +++ b/azurerm/internal/services/network/subnet_resource.go @@ -6,7 +6,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -221,8 +221,8 @@ func resourceSubnetCreate(d *pluginsdk.ResourceData, meta interface{}) error { // Network policies like network security groups are not supported by private endpoints. privateEndpointNetworkPolicies := d.Get("enforce_private_link_endpoint_network_policies").(bool) privateLinkServiceNetworkPolicies := d.Get("enforce_private_link_service_network_policies").(bool) - properties.PrivateEndpointNetworkPolicies = expandSubnetPrivateLinkNetworkPolicy(privateEndpointNetworkPolicies) - properties.PrivateLinkServiceNetworkPolicies = expandSubnetPrivateLinkNetworkPolicy(privateLinkServiceNetworkPolicies) + properties.PrivateEndpointNetworkPolicies = network.VirtualNetworkPrivateEndpointNetworkPolicies(expandSubnetPrivateLinkNetworkPolicy(privateEndpointNetworkPolicies)) + properties.PrivateLinkServiceNetworkPolicies = network.VirtualNetworkPrivateLinkServiceNetworkPolicies(expandSubnetPrivateLinkNetworkPolicy(privateLinkServiceNetworkPolicies)) serviceEndpointsRaw := d.Get("service_endpoints").([]interface{}) properties.ServiceEndpoints = expandSubnetServiceEndpoints(serviceEndpointsRaw) @@ -302,12 +302,12 @@ func resourceSubnetUpdate(d *pluginsdk.ResourceData, meta interface{}) error { if d.HasChange("enforce_private_link_endpoint_network_policies") { v := d.Get("enforce_private_link_endpoint_network_policies").(bool) - props.PrivateEndpointNetworkPolicies = expandSubnetPrivateLinkNetworkPolicy(v) + props.PrivateEndpointNetworkPolicies = network.VirtualNetworkPrivateEndpointNetworkPolicies(expandSubnetPrivateLinkNetworkPolicy(v)) } if d.HasChange("enforce_private_link_service_network_policies") { v := d.Get("enforce_private_link_service_network_policies").(bool) - props.PrivateLinkServiceNetworkPolicies = expandSubnetPrivateLinkNetworkPolicy(v) + props.PrivateLinkServiceNetworkPolicies = network.VirtualNetworkPrivateLinkServiceNetworkPolicies(expandSubnetPrivateLinkNetworkPolicy(v)) } if d.HasChange("service_endpoints") { @@ -377,8 +377,8 @@ func resourceSubnetRead(d *pluginsdk.ResourceData, meta interface{}) error { return fmt.Errorf("Error flattening `delegation`: %+v", err) } - d.Set("enforce_private_link_endpoint_network_policies", flattenSubnetPrivateLinkNetworkPolicy(props.PrivateEndpointNetworkPolicies)) - d.Set("enforce_private_link_service_network_policies", flattenSubnetPrivateLinkNetworkPolicy(props.PrivateLinkServiceNetworkPolicies)) + d.Set("enforce_private_link_endpoint_network_policies", flattenSubnetPrivateLinkNetworkPolicy(string(props.PrivateEndpointNetworkPolicies))) + d.Set("enforce_private_link_service_network_policies", flattenSubnetPrivateLinkNetworkPolicy(string(props.PrivateLinkServiceNetworkPolicies))) serviceEndpoints := flattenSubnetServiceEndpoints(props.ServiceEndpoints) if err := d.Set("service_endpoints", serviceEndpoints); err != nil { @@ -521,26 +521,22 @@ func flattenSubnetDelegation(delegations *[]network.Delegation) []interface{} { // TODO: confirm this logic below -func expandSubnetPrivateLinkNetworkPolicy(enabled bool) *string { +func expandSubnetPrivateLinkNetworkPolicy(enabled bool) string { // This is strange logic, but to get the schema to make sense for the end user // I exposed it with the same name that the Azure CLI does to be consistent // between the tool sets, which means true == Disabled. if enabled { - return utils.String("Disabled") + return "Disabled" } - return utils.String("Enabled") + return "Enabled" } -func flattenSubnetPrivateLinkNetworkPolicy(input *string) bool { +func flattenSubnetPrivateLinkNetworkPolicy(input string) bool { // This is strange logic, but to get the schema to make sense for the end user // I exposed it with the same name that the Azure CLI does to be consistent // between the tool sets, which means true == Disabled. - if input == nil { - return false - } - - return strings.EqualFold(*input, "Disabled") + return strings.EqualFold(input, "Disabled") } func expandSubnetServiceEndpointPolicies(input []interface{}) *[]network.ServiceEndpointPolicy { diff --git a/azurerm/internal/services/network/subnet_route_table_association_resource.go b/azurerm/internal/services/network/subnet_route_table_association_resource.go index cfae806f8cee..ee9f3bb2f297 100644 --- a/azurerm/internal/services/network/subnet_route_table_association_resource.go +++ b/azurerm/internal/services/network/subnet_route_table_association_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" diff --git a/azurerm/internal/services/network/subnet_service_endpoint_storage_policy_resource.go b/azurerm/internal/services/network/subnet_service_endpoint_storage_policy_resource.go index e3779cef97b9..927ea5bde6db 100644 --- a/azurerm/internal/services/network/subnet_service_endpoint_storage_policy_resource.go +++ b/azurerm/internal/services/network/subnet_service_endpoint_storage_policy_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" diff --git a/azurerm/internal/services/network/validate/express_route_circuit_connection_id.go b/azurerm/internal/services/network/validate/express_route_circuit_connection_id.go new file mode 100644 index 000000000000..d0d72159d3a1 --- /dev/null +++ b/azurerm/internal/services/network/validate/express_route_circuit_connection_id.go @@ -0,0 +1,23 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" +) + +func ExpressRouteCircuitConnectionID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := parse.ExpressRouteCircuitConnectionID(v); err != nil { + errors = append(errors, err) + } + + return +} diff --git a/azurerm/internal/services/network/validate/express_route_circuit_connection_id_test.go b/azurerm/internal/services/network/validate/express_route_circuit_connection_id_test.go new file mode 100644 index 000000000000..be46de3beee6 --- /dev/null +++ b/azurerm/internal/services/network/validate/express_route_circuit_connection_id_test.go @@ -0,0 +1,100 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import "testing" + +func TestExpressRouteCircuitConnectionID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + // missing ExpressRouteCircuitName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/", + Valid: false, + }, + + { + // missing value for ExpressRouteCircuitName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/", + Valid: false, + }, + + { + // missing PeeringName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/circuit1/", + Valid: false, + }, + + { + // missing value for PeeringName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/circuit1/peerings/", + Valid: false, + }, + + { + // missing ConnectionName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/circuit1/peerings/peering1/", + Valid: false, + }, + + { + // missing value for ConnectionName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/circuit1/peerings/peering1/connections/", + Valid: false, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/circuit1/peerings/peering1/connections/connection1", + Valid: true, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.NETWORK/EXPRESSROUTECIRCUITS/CIRCUIT1/PEERINGS/PEERING1/CONNECTIONS/CONNECTION1", + Valid: false, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := ExpressRouteCircuitConnectionID(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/azurerm/internal/services/network/validate/express_route_circuit_connection_name.go b/azurerm/internal/services/network/validate/express_route_circuit_connection_name.go new file mode 100644 index 000000000000..099c07792c03 --- /dev/null +++ b/azurerm/internal/services/network/validate/express_route_circuit_connection_name.go @@ -0,0 +1,21 @@ +package validate + +import ( + "fmt" + "regexp" +) + +func ExpressRouteCircuitConnectionName(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) + return + } + + if !regexp.MustCompile(`^(([a-zA-Z0-9])|([a-zA-Z0-9][a-zA-Z0-9_.-]{0,78}[a-zA-Z0-9_]))$`).MatchString(v) { + errors = append(errors, fmt.Errorf("%q must be between 1 and 80 characters in length and must begin with a letter or number, end with a letter, number or underscore, and may contain only letters, numbers, underscores, periods, or hyphens", k)) + return + } + + return +} diff --git a/azurerm/internal/services/network/validate/express_route_circuit_connection_test.go b/azurerm/internal/services/network/validate/express_route_circuit_connection_test.go new file mode 100644 index 000000000000..bff150935ea7 --- /dev/null +++ b/azurerm/internal/services/network/validate/express_route_circuit_connection_test.go @@ -0,0 +1,86 @@ +package validate + +import ( + "strings" + "testing" +) + +func TestExpressRouteCircuitConnectionName(t *testing.T) { + testCases := []struct { + Input string + Expected bool + }{ + { + Input: "", + Expected: false, + }, + { + Input: "a", + Expected: true, + }, + { + Input: "2", + Expected: true, + }, + { + Input: "_", + Expected: false, + }, + { + Input: "a_", + Expected: true, + }, + { + Input: "2_", + Expected: true, + }, + { + Input: "_a", + Expected: false, + }, + { + Input: "1a", + Expected: true, + }, + { + Input: "a2", + Expected: true, + }, + { + Input: "a-", + Expected: false, + }, + { + Input: "a-b", + Expected: true, + }, + { + Input: "a.b", + Expected: true, + }, + { + Input: "Test", + Expected: true, + }, + { + Input: strings.Repeat("s", 79), + Expected: true, + }, + { + Input: strings.Repeat("s", 80), + Expected: true, + }, + { + Input: strings.Repeat("s", 81), + Expected: false, + }, + } + + for _, v := range testCases { + _, errors := ExpressRouteCircuitConnectionName(v.Input, "name") + result := len(errors) == 0 + if result != v.Expected { + t.Fatalf("Expected the result to be %t but got %t (and %d errors)", v.Expected, result, len(errors)) + } + } +} diff --git a/azurerm/internal/services/network/validate/express_route_circuit_id.go b/azurerm/internal/services/network/validate/express_route_circuit_id.go new file mode 100644 index 000000000000..8801427b12bc --- /dev/null +++ b/azurerm/internal/services/network/validate/express_route_circuit_id.go @@ -0,0 +1,23 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" +) + +func ExpressRouteCircuitID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := parse.ExpressRouteCircuitID(v); err != nil { + errors = append(errors, err) + } + + return +} diff --git a/azurerm/internal/services/network/validate/express_route_circuit_id_test.go b/azurerm/internal/services/network/validate/express_route_circuit_id_test.go new file mode 100644 index 000000000000..bccd0e0d0717 --- /dev/null +++ b/azurerm/internal/services/network/validate/express_route_circuit_id_test.go @@ -0,0 +1,76 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import "testing" + +func TestExpressRouteCircuitID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + // missing Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/", + Valid: false, + }, + + { + // missing value for Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/", + Valid: false, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/erCircuit1", + Valid: true, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.NETWORK/EXPRESSROUTECIRCUITS/ERCIRCUIT1", + Valid: false, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := ExpressRouteCircuitID(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/azurerm/internal/services/network/validate/express_route_circuit_peering_id.go b/azurerm/internal/services/network/validate/express_route_circuit_peering_id.go new file mode 100644 index 000000000000..15a68871c874 --- /dev/null +++ b/azurerm/internal/services/network/validate/express_route_circuit_peering_id.go @@ -0,0 +1,23 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" +) + +func ExpressRouteCircuitPeeringID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := parse.ExpressRouteCircuitPeeringID(v); err != nil { + errors = append(errors, err) + } + + return +} diff --git a/azurerm/internal/services/network/validate/express_route_circuit_peering_id_test.go b/azurerm/internal/services/network/validate/express_route_circuit_peering_id_test.go new file mode 100644 index 000000000000..cb7df71488d0 --- /dev/null +++ b/azurerm/internal/services/network/validate/express_route_circuit_peering_id_test.go @@ -0,0 +1,88 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import "testing" + +func TestExpressRouteCircuitPeeringID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + // missing ExpressRouteCircuitName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/", + Valid: false, + }, + + { + // missing value for ExpressRouteCircuitName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/", + Valid: false, + }, + + { + // missing PeeringName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/erCircuit1/", + Valid: false, + }, + + { + // missing value for PeeringName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/erCircuit1/peerings/", + Valid: false, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteCircuits/erCircuit1/peerings/peering1", + Valid: true, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.NETWORK/EXPRESSROUTECIRCUITS/ERCIRCUIT1/PEERINGS/PEERING1", + Valid: false, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := ExpressRouteCircuitPeeringID(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/azurerm/internal/services/network/validate/express_route_connection_id.go b/azurerm/internal/services/network/validate/express_route_connection_id.go new file mode 100644 index 000000000000..2ca191dd2d5d --- /dev/null +++ b/azurerm/internal/services/network/validate/express_route_connection_id.go @@ -0,0 +1,23 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" +) + +func ExpressRouteConnectionID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := parse.ExpressRouteConnectionID(v); err != nil { + errors = append(errors, err) + } + + return +} diff --git a/azurerm/internal/services/network/validate/express_route_connection_id_test.go b/azurerm/internal/services/network/validate/express_route_connection_id_test.go new file mode 100644 index 000000000000..aebf1e14a24a --- /dev/null +++ b/azurerm/internal/services/network/validate/express_route_connection_id_test.go @@ -0,0 +1,88 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import "testing" + +func TestExpressRouteConnectionID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + // missing ExpressRouteGatewayName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/", + Valid: false, + }, + + { + // missing value for ExpressRouteGatewayName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteGateways/", + Valid: false, + }, + + { + // missing Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteGateways/ergw1/", + Valid: false, + }, + + { + // missing value for Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteGateways/ergw1/expressRouteConnections/", + Valid: false, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteGateways/ergw1/expressRouteConnections/erConnection1", + Valid: true, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.NETWORK/EXPRESSROUTEGATEWAYS/ERGW1/EXPRESSROUTECONNECTIONS/ERCONNECTION1", + Valid: false, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := ExpressRouteConnectionID(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/azurerm/internal/services/network/validate/express_route_connection_name.go b/azurerm/internal/services/network/validate/express_route_connection_name.go new file mode 100644 index 000000000000..b3e82cfb349c --- /dev/null +++ b/azurerm/internal/services/network/validate/express_route_connection_name.go @@ -0,0 +1,16 @@ +package validate + +import ( + "fmt" + "regexp" +) + +func ExpressRouteConnectionName(v interface{}, k string) (warnings []string, errors []error) { + value := v.(string) + + if matched := regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9_.-]{0,78}[a-zA-Z0-9_])$`).Match([]byte(value)); !matched { + errors = append(errors, fmt.Errorf("%q must be between 1 and 80 characters in length, begin with a letter or number, end with a letter, number or underscore, and may contain only letters, numbers, underscores, periods or hyphens", k)) + } + + return warnings, errors +} diff --git a/azurerm/internal/services/network/validate/express_route_connection_name_test.go b/azurerm/internal/services/network/validate/express_route_connection_name_test.go new file mode 100644 index 000000000000..3cc1329154ac --- /dev/null +++ b/azurerm/internal/services/network/validate/express_route_connection_name_test.go @@ -0,0 +1,57 @@ +package validate + +import ( + "strings" + "testing" +) + +func TestExpressRouteConnectionName(t *testing.T) { + testCases := []struct { + Input string + Expected bool + }{ + { + Input: "", + Expected: false, + }, + { + Input: strings.Repeat("s", 79), + Expected: true, + }, + { + Input: strings.Repeat("s", 80), + Expected: true, + }, + { + Input: strings.Repeat("s", 81), + Expected: false, + }, + { + Input: "_", + Expected: false, + }, + { + Input: "a", + Expected: true, + }, + { + Input: "a_", + Expected: true, + }, + { + Input: "ab", + Expected: true, + }, + { + Input: "abc", + Expected: true, + }, + } + for _, v := range testCases { + _, errors := ExpressRouteConnectionName(v.Input, "name") + result := len(errors) == 0 + if result != v.Expected { + t.Fatalf("Expected the result to be %t but got %t (and %d errors)", v.Expected, result, len(errors)) + } + } +} diff --git a/azurerm/internal/services/network/validate/express_route_gateway_id.go b/azurerm/internal/services/network/validate/express_route_gateway_id.go new file mode 100644 index 000000000000..51d29b695994 --- /dev/null +++ b/azurerm/internal/services/network/validate/express_route_gateway_id.go @@ -0,0 +1,23 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" +) + +func ExpressRouteGatewayID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := parse.ExpressRouteGatewayID(v); err != nil { + errors = append(errors, err) + } + + return +} diff --git a/azurerm/internal/services/network/validate/express_route_gateway_id_test.go b/azurerm/internal/services/network/validate/express_route_gateway_id_test.go new file mode 100644 index 000000000000..01c18098ea4f --- /dev/null +++ b/azurerm/internal/services/network/validate/express_route_gateway_id_test.go @@ -0,0 +1,76 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import "testing" + +func TestExpressRouteGatewayID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + // missing Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/", + Valid: false, + }, + + { + // missing value for Name + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteGateways/", + Valid: false, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/expressRouteGateways/ergw1", + Valid: true, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.NETWORK/EXPRESSROUTEGATEWAYS/ERGW1", + Valid: false, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := ExpressRouteGatewayID(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/azurerm/internal/services/network/validate/network_watcher_flow_log_name.go b/azurerm/internal/services/network/validate/network_watcher_flow_log_name.go new file mode 100644 index 000000000000..8ffe8309e57f --- /dev/null +++ b/azurerm/internal/services/network/validate/network_watcher_flow_log_name.go @@ -0,0 +1,15 @@ +package validate + +import ( + "fmt" + "regexp" +) + +func NetworkWatcherFlowLogName(v interface{}, k string) (warnings []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[^\W_]([\w]{0,79}$|[\w]{0,78}[\w\-.]$)`).MatchString(value) { + errors = append(errors, fmt.Errorf("the name can be up to 80 characters long. It must begin with a word character, and it must end with a word character or with '_'. The name may contain word characters or '.', '-', '_'. %q: %q", k, value)) + } + + return warnings, errors +} diff --git a/azurerm/internal/services/network/validate/virtual_network_dns_servers_id.go b/azurerm/internal/services/network/validate/virtual_network_dns_servers_id.go new file mode 100644 index 000000000000..d5e227dce26e --- /dev/null +++ b/azurerm/internal/services/network/validate/virtual_network_dns_servers_id.go @@ -0,0 +1,23 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import ( + "fmt" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" +) + +func VirtualNetworkDnsServersID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := parse.VirtualNetworkDnsServersID(v); err != nil { + errors = append(errors, err) + } + + return +} diff --git a/azurerm/internal/services/network/validate/virtual_network_dns_servers_id_test.go b/azurerm/internal/services/network/validate/virtual_network_dns_servers_id_test.go new file mode 100644 index 000000000000..93a1da35e69f --- /dev/null +++ b/azurerm/internal/services/network/validate/virtual_network_dns_servers_id_test.go @@ -0,0 +1,88 @@ +package validate + +// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten + +import "testing" + +func TestVirtualNetworkDnsServersID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + // missing VirtualNetworkName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/", + Valid: false, + }, + + { + // missing value for VirtualNetworkName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/virtualNetworks/", + Valid: false, + }, + + { + // missing DnsServerName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/virtualNetworks/network1/", + Valid: false, + }, + + { + // missing value for DnsServerName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/virtualNetworks/network1/dnsServers/", + Valid: false, + }, + + { + // valid + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Network/virtualNetworks/network1/dnsServers/default", + Valid: true, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.NETWORK/VIRTUALNETWORKS/NETWORK1/DNSSERVERS/DEFAULT", + Valid: false, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := VirtualNetworkDnsServersID(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Expected %t but got %t", tc.Valid, valid) + } + } +} diff --git a/azurerm/internal/services/network/virtual_hub_bgp_connection_resource.go b/azurerm/internal/services/network/virtual_hub_bgp_connection_resource.go index da7114d30e1b..70252e1e1b28 100644 --- a/azurerm/internal/services/network/virtual_hub_bgp_connection_resource.go +++ b/azurerm/internal/services/network/virtual_hub_bgp_connection_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" diff --git a/azurerm/internal/services/network/virtual_hub_connection_resource.go b/azurerm/internal/services/network/virtual_hub_connection_resource.go index a42826ffe59a..1e8126fc7e28 100644 --- a/azurerm/internal/services/network/virtual_hub_connection_resource.go +++ b/azurerm/internal/services/network/virtual_hub_connection_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" diff --git a/azurerm/internal/services/network/virtual_hub_data_source.go b/azurerm/internal/services/network/virtual_hub_data_source.go index 8829641f4d45..cb121a8a7f02 100644 --- a/azurerm/internal/services/network/virtual_hub_data_source.go +++ b/azurerm/internal/services/network/virtual_hub_data_source.go @@ -4,10 +4,9 @@ import ( "fmt" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" diff --git a/azurerm/internal/services/network/virtual_hub_ip_resource.go b/azurerm/internal/services/network/virtual_hub_ip_resource.go index d1c76ccb0626..792904ec8049 100644 --- a/azurerm/internal/services/network/virtual_hub_ip_resource.go +++ b/azurerm/internal/services/network/virtual_hub_ip_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" @@ -67,10 +67,10 @@ func resourceVirtualHubIP() *pluginsdk.Resource { "private_ip_allocation_method": { Type: pluginsdk.TypeString, Optional: true, - Default: network.Dynamic, + Default: network.IPAllocationMethodDynamic, ValidateFunc: validation.StringInSlice([]string{ - string(network.Dynamic), - string(network.Static), + string(network.IPAllocationMethodDynamic), + string(network.IPAllocationMethodStatic), }, false), }, diff --git a/azurerm/internal/services/network/virtual_hub_resource.go b/azurerm/internal/services/network/virtual_hub_resource.go index ee5505cff8b2..348bbee76d89 100644 --- a/azurerm/internal/services/network/virtual_hub_resource.go +++ b/azurerm/internal/services/network/virtual_hub_resource.go @@ -6,7 +6,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" @@ -180,7 +180,7 @@ func resourceVirtualHubCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) ContinuousTargetOccurence: 3, Timeout: time.Until(timeout), } - respRaw, err := stateConf.WaitForState() + respRaw, err := stateConf.WaitForStateContext(ctx) if err != nil { return fmt.Errorf("waiting for Virtual Hub %q (Host Group Name %q) provisioning route: %+v", name, resourceGroup, err) } diff --git a/azurerm/internal/services/network/virtual_hub_resource_test.go b/azurerm/internal/services/network/virtual_hub_resource_test.go index c1c862726ad4..804dbe94bcca 100644 --- a/azurerm/internal/services/network/virtual_hub_resource_test.go +++ b/azurerm/internal/services/network/virtual_hub_resource_test.go @@ -5,11 +5,10 @@ import ( "fmt" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) diff --git a/azurerm/internal/services/network/virtual_hub_route_table_resource.go b/azurerm/internal/services/network/virtual_hub_route_table_resource.go index 119776026898..7c7b19da20f2 100644 --- a/azurerm/internal/services/network/virtual_hub_route_table_resource.go +++ b/azurerm/internal/services/network/virtual_hub_route_table_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" diff --git a/azurerm/internal/services/network/virtual_hub_security_partner_provider_resource.go b/azurerm/internal/services/network/virtual_hub_security_partner_provider_resource.go index ae44de4230d0..20de4a6e8761 100644 --- a/azurerm/internal/services/network/virtual_hub_security_partner_provider_resource.go +++ b/azurerm/internal/services/network/virtual_hub_security_partner_provider_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -54,9 +54,9 @@ func resourceVirtualHubSecurityPartnerProvider() *pluginsdk.Resource { Required: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.ZScaler), - string(network.IBoss), - string(network.Checkpoint), + string(network.SecurityProviderNameZScaler), + string(network.SecurityProviderNameIBoss), + string(network.SecurityProviderNameCheckpoint), }, false), }, diff --git a/azurerm/internal/services/network/virtual_network_data_source.go b/azurerm/internal/services/network/virtual_network_data_source.go index 6414dbd7d5c8..6ad2b1a78aed 100644 --- a/azurerm/internal/services/network/virtual_network_data_source.go +++ b/azurerm/internal/services/network/virtual_network_data_source.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" diff --git a/azurerm/internal/services/network/virtual_network_dns_servers_resource.go b/azurerm/internal/services/network/virtual_network_dns_servers_resource.go new file mode 100644 index 000000000000..8d5c6be3a683 --- /dev/null +++ b/azurerm/internal/services/network/virtual_network_dns_servers_resource.go @@ -0,0 +1,177 @@ +package network + +import ( + "fmt" + "log" + "time" + + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceVirtualNetworkDnsServers() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceVirtualNetworkDnsServersCreateUpdate, + Read: resourceVirtualNetworkDnsServersRead, + Update: resourceVirtualNetworkDnsServersCreateUpdate, + Delete: resourceVirtualNetworkDnsServersDelete, + // TODO: replace this with an importer which validates the ID during import + Importer: pluginsdk.DefaultImporter(), + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*pluginsdk.Schema{ + "virtual_network_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.VirtualNetworkID, + }, + + "dns_servers": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + }, + } +} + +func resourceVirtualNetworkDnsServersCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Network.VnetClient + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + vnetId, err := parse.VirtualNetworkID(d.Get("virtual_network_id").(string)) + if err != nil { + return err + } + + // This is a virtual resource so the last segment is hardcoded + id := parse.NewVirtualNetworkDnsServersID(vnetId.SubscriptionId, vnetId.ResourceGroup, vnetId.Name, "default") + + vnet, err := client.Get(ctx, id.ResourceGroup, id.VirtualNetworkName, "") + if err != nil { + if utils.ResponseWasNotFound(vnet.Response) { + return fmt.Errorf("%s could not be found: %s", vnetId, err) + } + return fmt.Errorf("reading %s: %s", vnetId, err) + } + + locks.ByName(id.VirtualNetworkName, VirtualNetworkResourceName) + defer locks.UnlockByName(id.VirtualNetworkName, VirtualNetworkResourceName) + + if vnet.VirtualNetworkPropertiesFormat == nil { + return fmt.Errorf("%s was returned without any properties", vnetId) + } + if vnet.VirtualNetworkPropertiesFormat.DhcpOptions == nil { + vnet.VirtualNetworkPropertiesFormat.DhcpOptions = &network.DhcpOptions{} + } + + vnet.VirtualNetworkPropertiesFormat.DhcpOptions.DNSServers = utils.ExpandStringSlice(d.Get("dns_servers").([]interface{})) + + future, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.VirtualNetworkName, vnet) + if err != nil { + return fmt.Errorf("updating %s: %+v", id, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for update of %s: %+v", id, err) + } + + d.SetId(id.ID()) + return resourceVirtualNetworkDnsServersRead(d, meta) +} + +func resourceVirtualNetworkDnsServersRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Network.VnetClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.VirtualNetworkDnsServersID(d.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, id.ResourceGroup, id.VirtualNetworkName, "") + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + d.SetId("") + return nil + } + return fmt.Errorf("retrieving %s: %+v", *id, err) + } + + vnetId := parse.NewVirtualNetworkID(id.SubscriptionId, id.ResourceGroup, id.VirtualNetworkName) + d.Set("virtual_network_id", vnetId.ID()) + + if props := resp.VirtualNetworkPropertiesFormat; props != nil { + if err := d.Set("dns_servers", flattenVirtualNetworkDNSServers(props.DhcpOptions)); err != nil { + return fmt.Errorf("setting `dns_servers`: %+v", err) + } + } + + return nil +} + +func resourceVirtualNetworkDnsServersDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Network.VnetClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := parse.VirtualNetworkDnsServersID(d.Id()) + if err != nil { + return err + } + + vnetId := parse.NewVirtualNetworkID(id.SubscriptionId, id.ResourceGroup, id.VirtualNetworkName) + + vnet, err := client.Get(ctx, id.ResourceGroup, id.VirtualNetworkName, "") + if err != nil { + if utils.ResponseWasNotFound(vnet.Response) { + log.Printf("[INFO] Virtual Network %q does not exist - removing %s from state", vnetId.ID(), id) + return nil + } + return fmt.Errorf("reading %s: %s", vnetId, err) + } + + locks.ByName(id.VirtualNetworkName, VirtualNetworkResourceName) + defer locks.UnlockByName(id.VirtualNetworkName, VirtualNetworkResourceName) + + if vnet.VirtualNetworkPropertiesFormat == nil { + return fmt.Errorf("%s was returned without any properties", vnetId) + } + if vnet.VirtualNetworkPropertiesFormat.DhcpOptions == nil { + log.Printf("[INFO] dhcpOptions for %s was nil, dnsServers already deleted - removing %s from state", vnetId.ID(), id) + return nil + } + + vnet.VirtualNetworkPropertiesFormat.DhcpOptions.DNSServers = utils.ExpandStringSlice(make([]interface{}, 0)) + + future, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.VirtualNetworkName, vnet) + if err != nil { + return fmt.Errorf("deleting %s: %+v", id, err) + } + + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting to delete %s: %+v", id, err) + } + + return nil +} diff --git a/azurerm/internal/services/network/virtual_network_dns_servers_resource_test.go b/azurerm/internal/services/network/virtual_network_dns_servers_resource_test.go new file mode 100644 index 000000000000..2bde1fa7addc --- /dev/null +++ b/azurerm/internal/services/network/virtual_network_dns_servers_resource_test.go @@ -0,0 +1,78 @@ +package network_test + +import ( + "context" + "fmt" + "testing" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +type VirtualNetworkDnsServersResource struct{} + +func TestAccVirtualNetworkDnsServers_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_virtual_network_dns_servers", "test") + r := VirtualNetworkDnsServersResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func (t VirtualNetworkDnsServersResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.VirtualNetworkDnsServersID(state.ID) + if err != nil { + return nil, err + } + + resp, err := clients.Network.VnetClient.Get(ctx, id.ResourceGroup, id.VirtualNetworkName, "") + if err != nil { + return nil, fmt.Errorf("reading %s: %+v", *id, err) + } + + exists := resp.ID != nil && resp.VirtualNetworkPropertiesFormat != nil && resp.VirtualNetworkPropertiesFormat.DhcpOptions != nil && + resp.VirtualNetworkPropertiesFormat.DhcpOptions.DNSServers != nil && len(*resp.VirtualNetworkPropertiesFormat.DhcpOptions.DNSServers) > 0 + + return utils.Bool(exists), nil +} + +func (VirtualNetworkDnsServersResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctestvirtnet%d" + address_space = ["10.0.0.0/16"] + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + subnet { + name = "subnet1" + address_prefix = "10.0.1.0/24" + } +} + +resource "azurerm_virtual_network_dns_servers" "test" { + virtual_network_id = azurerm_virtual_network.test.id + dns_servers = ["10.7.7.2", "10.7.7.7", "10.7.7.1"] +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +} diff --git a/azurerm/internal/services/network/virtual_network_gateway_connection_data_source.go b/azurerm/internal/services/network/virtual_network_gateway_connection_data_source.go index 409c201ff21e..8c440f403bcc 100644 --- a/azurerm/internal/services/network/virtual_network_gateway_connection_data_source.go +++ b/azurerm/internal/services/network/virtual_network_gateway_connection_data_source.go @@ -4,14 +4,13 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) diff --git a/azurerm/internal/services/network/virtual_network_gateway_connection_data_source_test.go b/azurerm/internal/services/network/virtual_network_gateway_connection_data_source_test.go index bebec1b646d8..f88bb7726fc4 100644 --- a/azurerm/internal/services/network/virtual_network_gateway_connection_data_source_test.go +++ b/azurerm/internal/services/network/virtual_network_gateway_connection_data_source_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" ) @@ -22,7 +22,7 @@ func TestAccDataSourceVirtualNetworkGatewayConnection_sitetosite(t *testing.T) { Config: r.sitetosite(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("shared_key").HasValue(sharedKey), - check.That(data.ResourceName).Key("type").HasValue(string(network.IPsec)), + check.That(data.ResourceName).Key("type").HasValue(string(network.VirtualNetworkGatewayConnectionTypeIPsec)), ), }, }) @@ -41,8 +41,8 @@ func TestAccDataSourceVirtualNetworkGatewayConnection_vnettovnet(t *testing.T) { Check: acceptance.ComposeTestCheckFunc( acceptance.TestCheckResourceAttr(data1.ResourceName, "shared_key", sharedKey), acceptance.TestCheckResourceAttr(data2.ResourceName, "shared_key", sharedKey), - acceptance.TestCheckResourceAttr(data1.ResourceName, "type", string(network.Vnet2Vnet)), - acceptance.TestCheckResourceAttr(data2.ResourceName, "type", string(network.Vnet2Vnet)), + acceptance.TestCheckResourceAttr(data1.ResourceName, "type", string(network.VirtualNetworkGatewayConnectionTypeVnet2Vnet)), + acceptance.TestCheckResourceAttr(data2.ResourceName, "type", string(network.VirtualNetworkGatewayConnectionTypeVnet2Vnet)), ), }, }) @@ -58,10 +58,10 @@ func TestAccDataSourceVirtualNetworkGatewayConnection_ipsecpolicy(t *testing.T) Config: r.ipsecpolicy(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).Key("shared_key").HasValue(sharedKey), - check.That(data.ResourceName).Key("type").HasValue(string(network.IPsec)), + check.That(data.ResourceName).Key("type").HasValue(string(network.VirtualNetworkGatewayConnectionTypeIPsec)), check.That(data.ResourceName).Key("routing_weight").HasValue("20"), check.That(data.ResourceName).Key("ipsec_policy.0.dh_group").HasValue(string(network.DhGroupDHGroup14)), - check.That(data.ResourceName).Key("ipsec_policy.0.ike_encryption").HasValue(string(network.AES256)), + check.That(data.ResourceName).Key("ipsec_policy.0.ike_encryption").HasValue(string(network.IkeEncryptionAES256)), check.That(data.ResourceName).Key("ipsec_policy.0.ike_integrity").HasValue(string(network.IkeIntegritySHA256)), check.That(data.ResourceName).Key("ipsec_policy.0.ipsec_encryption").HasValue(string(network.IpsecEncryptionAES256)), check.That(data.ResourceName).Key("ipsec_policy.0.ipsec_integrity").HasValue(string(network.IpsecIntegritySHA256)), diff --git a/azurerm/internal/services/network/virtual_network_gateway_connection_resource.go b/azurerm/internal/services/network/virtual_network_gateway_connection_resource.go index 6ec67a8af851..9209308b3eee 100644 --- a/azurerm/internal/services/network/virtual_network_gateway_connection_resource.go +++ b/azurerm/internal/services/network/virtual_network_gateway_connection_resource.go @@ -5,12 +5,11 @@ import ( "log" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" - - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/network/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" @@ -53,9 +52,9 @@ func resourceVirtualNetworkGatewayConnection() *pluginsdk.Resource { Required: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.ExpressRoute), - string(network.IPsec), - string(network.Vnet2Vnet), + string(network.VirtualNetworkGatewayConnectionTypeExpressRoute), + string(network.VirtualNetworkGatewayConnectionTypeIPsec), + string(network.VirtualNetworkGatewayConnectionTypeVnet2Vnet), }, true), DiffSuppressFunc: suppress.CaseDifference, }, @@ -143,8 +142,8 @@ func resourceVirtualNetworkGatewayConnection() *pluginsdk.Resource { Computed: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.IKEv1), - string(network.IKEv2), + string(network.VirtualNetworkGatewayConnectionProtocolIKEv1), + string(network.VirtualNetworkGatewayConnectionProtocolIKEv2), }, false), }, @@ -199,13 +198,13 @@ func resourceVirtualNetworkGatewayConnection() *pluginsdk.Resource { Required: true, DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.StringInSlice([]string{ - string(network.AES128), - string(network.AES192), - string(network.AES256), - string(network.DES), - string(network.DES3), - string(network.GCMAES128), - string(network.GCMAES256), + string(network.IkeEncryptionAES128), + string(network.IkeEncryptionAES192), + string(network.IkeEncryptionAES256), + string(network.IkeEncryptionDES), + string(network.IkeEncryptionDES3), + string(network.IkeEncryptionGCMAES128), + string(network.IkeEncryptionGCMAES256), }, true), }, @@ -583,19 +582,19 @@ func getVirtualNetworkGatewayConnectionProperties(d *pluginsdk.ResourceData) (*n props.IpsecPolicies = expandVirtualNetworkGatewayConnectionIpsecPolicies(v.([]interface{})) } - if props.ConnectionType == network.ExpressRoute { + if props.ConnectionType == network.VirtualNetworkGatewayConnectionTypeExpressRoute { if props.Peer == nil || props.Peer.ID == nil { return nil, fmt.Errorf("`express_route_circuit_id` must be specified when `type` is set to `ExpressRoute`") } } - if props.ConnectionType == network.IPsec { + if props.ConnectionType == network.VirtualNetworkGatewayConnectionTypeIPsec { if props.LocalNetworkGateway2 == nil || props.LocalNetworkGateway2.ID == nil { return nil, fmt.Errorf("`local_network_gateway_id` must be specified when `type` is set to `IPsec`") } } - if props.ConnectionType == network.Vnet2Vnet { + if props.ConnectionType == network.VirtualNetworkGatewayConnectionTypeVnet2Vnet { if props.VirtualNetworkGateway2 == nil || props.VirtualNetworkGateway2.ID == nil { return nil, fmt.Errorf("`peer_virtual_network_gateway_id` must be specified when `type` is set to `Vnet2Vnet`") } diff --git a/azurerm/internal/services/network/virtual_network_gateway_data_source.go b/azurerm/internal/services/network/virtual_network_gateway_data_source.go index 019acb26c09f..9b6a369edae4 100644 --- a/azurerm/internal/services/network/virtual_network_gateway_data_source.go +++ b/azurerm/internal/services/network/virtual_network_gateway_data_source.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" diff --git a/azurerm/internal/services/network/virtual_network_gateway_resource.go b/azurerm/internal/services/network/virtual_network_gateway_resource.go index 2c9c98d4c218..cdabdf65742b 100644 --- a/azurerm/internal/services/network/virtual_network_gateway_resource.go +++ b/azurerm/internal/services/network/virtual_network_gateway_resource.go @@ -7,7 +7,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -66,11 +66,11 @@ func resourceVirtualNetworkGateway() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, ForceNew: true, - Default: string(network.RouteBased), + Default: string(network.VpnTypeRouteBased), DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.StringInSlice([]string{ - string(network.RouteBased), - string(network.PolicyBased), + string(network.VpnTypeRouteBased), + string(network.VpnTypePolicyBased), }, true), }, @@ -139,10 +139,10 @@ func resourceVirtualNetworkGateway() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.Static), - string(network.Dynamic), + string(network.IPAllocationMethodStatic), + string(network.IPAllocationMethodDynamic), }, false), - Default: string(network.Dynamic), + Default: string(network.IPAllocationMethodDynamic), }, "subnet_id": { @@ -289,9 +289,9 @@ func resourceVirtualNetworkGateway() *pluginsdk.Resource { Elem: &pluginsdk.Schema{ Type: pluginsdk.TypeString, ValidateFunc: validation.StringInSlice([]string{ - string(network.IkeV2), - string(network.OpenVPN), - string(network.SSTP), + string(network.VpnClientProtocolIkeV2), + string(network.VpnClientProtocolOpenVPN), + string(network.VpnClientProtocolSSTP), }, true), }, }, @@ -596,21 +596,21 @@ func getVirtualNetworkGatewayProperties(id parse.VirtualNetworkGatewayId, d *plu } // Sku validation for policy-based VPN gateways - if props.GatewayType == network.VirtualNetworkGatewayTypeVpn && props.VpnType == network.PolicyBased { + if props.GatewayType == network.VirtualNetworkGatewayTypeVpn && props.VpnType == network.VpnTypePolicyBased { if ok, err := evaluateSchemaValidateFunc(string(props.Sku.Name), "sku", validateVirtualNetworkGatewayPolicyBasedVpnSku()); !ok { return nil, err } } // Sku validation for route-based VPN gateways of first geneneration - if props.GatewayType == network.VirtualNetworkGatewayTypeVpn && props.VpnType == network.RouteBased && props.VpnGatewayGeneration == network.VpnGatewayGenerationGeneration1 { + if props.GatewayType == network.VirtualNetworkGatewayTypeVpn && props.VpnType == network.VpnTypeRouteBased && props.VpnGatewayGeneration == network.VpnGatewayGenerationGeneration1 { if ok, err := evaluateSchemaValidateFunc(string(props.Sku.Name), "sku", validateVirtualNetworkGatewayRouteBasedVpnSkuGeneration1()); !ok { return nil, err } } // Sku validation for route-based VPN gateways of second geneneration - if props.GatewayType == network.VirtualNetworkGatewayTypeVpn && props.VpnType == network.RouteBased && props.VpnGatewayGeneration == network.VpnGatewayGenerationGeneration2 { + if props.GatewayType == network.VirtualNetworkGatewayTypeVpn && props.VpnType == network.VpnTypeRouteBased && props.VpnGatewayGeneration == network.VpnGatewayGenerationGeneration2 { if ok, err := evaluateSchemaValidateFunc(string(props.Sku.Name), "sku", validateVirtualNetworkGatewayRouteBasedVpnSkuGeneration2()); !ok { return nil, err } diff --git a/azurerm/internal/services/network/virtual_network_peering_resource.go b/azurerm/internal/services/network/virtual_network_peering_resource.go index b7a1083269ff..80f909f1ff50 100644 --- a/azurerm/internal/services/network/virtual_network_peering_resource.go +++ b/azurerm/internal/services/network/virtual_network_peering_resource.go @@ -7,7 +7,7 @@ import ( "sync" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -219,6 +219,7 @@ func getVirtualNetworkPeeringProperties(d *pluginsdk.ResourceData) *network.Virt } } +//lintignore:R006 func retryVnetPeeringsClientCreateUpdate(d *pluginsdk.ResourceData, resGroup string, vnetName string, name string, peer network.VirtualNetworkPeering, meta interface{}) func() *pluginsdk.RetryError { return func() *pluginsdk.RetryError { vnetPeeringsClient := meta.(*clients.Client).Network.VnetPeeringsClient diff --git a/azurerm/internal/services/network/virtual_network_peering_resource_test.go b/azurerm/internal/services/network/virtual_network_peering_resource_test.go index ce4e80c59049..e2165299e1ff 100644 --- a/azurerm/internal/services/network/virtual_network_peering_resource_test.go +++ b/azurerm/internal/services/network/virtual_network_peering_resource_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) type VirtualNetworkPeeringResource struct { diff --git a/azurerm/internal/services/network/virtual_network_resource.go b/azurerm/internal/services/network/virtual_network_resource.go index 1264e6c4c38a..55c459e14d12 100644 --- a/azurerm/internal/services/network/virtual_network_resource.go +++ b/azurerm/internal/services/network/virtual_network_resource.go @@ -8,7 +8,7 @@ import ( "net/http" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -91,6 +91,7 @@ func resourceVirtualNetwork() *pluginsdk.Resource { "dns_servers": { Type: pluginsdk.TypeList, Optional: true, + Computed: true, Elem: &pluginsdk.Schema{ Type: pluginsdk.TypeString, ValidateFunc: validation.StringIsNotEmpty, diff --git a/azurerm/internal/services/network/virtual_wan_data_source.go b/azurerm/internal/services/network/virtual_wan_data_source.go index 88cfc3182dc1..401f45e93193 100644 --- a/azurerm/internal/services/network/virtual_wan_data_source.go +++ b/azurerm/internal/services/network/virtual_wan_data_source.go @@ -4,8 +4,7 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" - + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" diff --git a/azurerm/internal/services/network/virtual_wan_resource.go b/azurerm/internal/services/network/virtual_wan_resource.go index 9fd1413c0abe..e728e02eaa56 100644 --- a/azurerm/internal/services/network/virtual_wan_resource.go +++ b/azurerm/internal/services/network/virtual_wan_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" diff --git a/azurerm/internal/services/network/vpn_gateway_connection_resource.go b/azurerm/internal/services/network/vpn_gateway_connection_resource.go index 3787b6e33b9b..3fcb00e10dfc 100644 --- a/azurerm/internal/services/network/vpn_gateway_connection_resource.go +++ b/azurerm/internal/services/network/vpn_gateway_connection_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -120,10 +120,10 @@ func resourceVPNGatewayConnection() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.IKEv1), - string(network.IKEv2), + string(network.VirtualNetworkGatewayConnectionProtocolIKEv1), + string(network.VirtualNetworkGatewayConnectionProtocolIKEv2), }, false), - Default: string(network.IKEv2), + Default: string(network.VirtualNetworkGatewayConnectionProtocolIKEv2), }, "bandwidth_mbps": { @@ -194,13 +194,13 @@ func resourceVPNGatewayConnection() *pluginsdk.Resource { Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.DES), - string(network.DES3), - string(network.AES128), - string(network.AES192), - string(network.AES256), - string(network.GCMAES128), - string(network.GCMAES256), + string(network.IkeEncryptionDES), + string(network.IkeEncryptionDES3), + string(network.IkeEncryptionAES128), + string(network.IkeEncryptionAES192), + string(network.IkeEncryptionAES256), + string(network.IkeEncryptionGCMAES128), + string(network.IkeEncryptionGCMAES256), }, false), }, diff --git a/azurerm/internal/services/network/vpn_gateway_resource.go b/azurerm/internal/services/network/vpn_gateway_resource.go index 733e25c127a0..12b4cab6e12d 100644 --- a/azurerm/internal/services/network/vpn_gateway_resource.go +++ b/azurerm/internal/services/network/vpn_gateway_resource.go @@ -6,7 +6,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" @@ -406,7 +406,7 @@ func waitForCompletion(d *pluginsdk.ResourceData, ctx context.Context, client *n stateConf.Timeout = d.Timeout(pluginsdk.TimeoutUpdate) } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for creation of Virtual Hub %q (Resource Group %q): %+v", name, resourceGroup, err) } @@ -497,10 +497,10 @@ func vpnGatewayWaitForCreatedRefreshFunc(ctx context.Context, client *network.Vp log.Printf("[DEBUG] VPN Gateway %q (Resource Group %q) is %q..", name, resourceGroup, string(resp.VpnGatewayProperties.ProvisioningState)) switch resp.VpnGatewayProperties.ProvisioningState { - case network.Succeeded: + case network.ProvisioningStateSucceeded: return "available", "available", nil - case network.Failed: + case network.ProvisioningStateFailed: return "error", "error", fmt.Errorf("VPN Gateway %q (Resource Group %q) is in provisioningState `Failed`", name, resourceGroup) default: diff --git a/azurerm/internal/services/network/vpn_server_configuration_resource.go b/azurerm/internal/services/network/vpn_server_configuration_resource.go index caf18a6c7fdb..58f066dd0225 100644 --- a/azurerm/internal/services/network/vpn_server_configuration_resource.go +++ b/azurerm/internal/services/network/vpn_server_configuration_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-07-01/network" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-11-01/network" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -51,16 +51,11 @@ func resourceVPNServerConfiguration() *pluginsdk.Resource { Elem: &pluginsdk.Schema{ Type: pluginsdk.TypeString, ValidateFunc: validation.StringInSlice([]string{ - string(network.AAD), - string(network.Certificate), - string(network.Radius), + string(network.VpnAuthenticationTypeAAD), + string(network.VpnAuthenticationTypeCertificate), + string(network.VpnAuthenticationTypeRadius), }, false), }, - - // StatusCode=400 -- Original Error: Code="MultipleVpnAuthenticationTypesNotSupprtedOnVpnServerConfiguration" - // Message="VpnServerConfiguration XXX/acctestrg-191125124621329676 supports single VpnAuthenticationType at a time. - // Customer has specified 3 number of VpnAuthenticationTypes." - MaxItems: 1, }, // Optional @@ -149,13 +144,13 @@ func resourceVPNServerConfiguration() *pluginsdk.Resource { Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ - string(network.AES128), - string(network.AES192), - string(network.AES256), - string(network.DES), - string(network.DES3), - string(network.GCMAES128), - string(network.GCMAES256), + string(network.IkeEncryptionAES128), + string(network.IkeEncryptionAES192), + string(network.IkeEncryptionAES256), + string(network.IkeEncryptionDES), + string(network.IkeEncryptionDES3), + string(network.IkeEncryptionGCMAES128), + string(network.IkeEncryptionGCMAES256), }, false), }, @@ -437,13 +432,13 @@ func resourceVPNServerConfigurationCreateUpdate(d *pluginsdk.ResourceData, meta authType := network.VpnAuthenticationType(v.(string)) switch authType { - case network.AAD: + case network.VpnAuthenticationTypeAAD: supportsAAD = true - case network.Certificate: + case network.VpnAuthenticationTypeCertificate: supportsCertificates = true - case network.Radius: + case network.VpnAuthenticationTypeRadius: supportsRadius = true default: diff --git a/azurerm/internal/services/network/vpn_server_configuration_resource_test.go b/azurerm/internal/services/network/vpn_server_configuration_resource_test.go index 045da61ae1e7..dd80d2e1e8d4 100644 --- a/azurerm/internal/services/network/vpn_server_configuration_resource_test.go +++ b/azurerm/internal/services/network/vpn_server_configuration_resource_test.go @@ -174,6 +174,21 @@ func TestAccVPNServerConfiguration_tags(t *testing.T) { }) } +func TestAccVPNServerConfiguration_multipleAuthTypes(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_vpn_server_configuration", "test") + r := VPNServerConfigurationResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.multipleAuthTypes(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func (t VPNServerConfigurationResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := parse.VpnServerConfigurationID(state.ID) if err != nil { @@ -520,6 +535,53 @@ EOF `, r.template(data), data.RandomInteger) } +func (r VPNServerConfigurationResource) multipleAuthTypes(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +data "azurerm_subscription" "current" {} + +resource "azurerm_vpn_server_configuration" "test" { + name = "acctestVPNSC-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + vpn_authentication_types = ["AAD", "Certificate"] + + azure_active_directory_authentication { + audience = "00000000-abcd-abcd-abcd-999999999999" + issuer = "https://sts.windows.net/${data.azurerm_subscription.current.tenant_id}/" + tenant = "https://login.microsoftonline.com/${data.azurerm_subscription.current.tenant_id}" + } + + client_root_certificate { + name = "DigiCert-Federated-ID-Root-CA" + public_cert_data = < 128 { + errors = append(errors, fmt.Errorf("length should be equal to or less than %d, got %q", 128, v)) + return + } + + if !regexp.MustCompile(`^[a-zA-Z0-9-_]+$`).MatchString(v) { + errors = append(errors, fmt.Errorf("%q must only contains numbers, characters and `-`, `_`, got %v", k, v)) + return + } + return +} diff --git a/azurerm/internal/services/postgres/validate/flexible_server_firewall_rule_name_test.go b/azurerm/internal/services/postgres/validate/flexible_server_firewall_rule_name_test.go new file mode 100644 index 000000000000..07fb0b0da77f --- /dev/null +++ b/azurerm/internal/services/postgres/validate/flexible_server_firewall_rule_name_test.go @@ -0,0 +1,65 @@ +package validate + +import ( + "strings" + "testing" +) + +func TestFlexibleServerFirewallRuleName(t *testing.T) { + tests := []struct { + name string + input string + valid bool + }{ + { + name: "Empty", + input: "", + valid: false, + }, + { + name: "Invalid Characters", + input: "flexible%", + valid: false, + }, + { + name: "One character", + input: "a", + valid: true, + }, + { + name: "End with `_`", + input: "test_", + valid: true, + }, + { + name: "Start with `-`", + input: "_test", + valid: true, + }, + { + name: "Valid", + input: "flexible-6-test", + valid: true, + }, + { + name: "Valid2", + input: "flex6ible6-6-te6st", + valid: true, + }, + { + name: "too long", + input: strings.Repeat("a", 129), + valid: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := FlexibleServerFirewallRuleName(tt.input, "name") + valid := err == nil + if valid != tt.valid { + t.Errorf("Expected valid status %t but got %t for input %s", tt.valid, valid, tt.input) + } + }) + } +} diff --git a/azurerm/internal/services/powerbi/client/client.go b/azurerm/internal/services/powerbi/client/client.go index eab8f0e51c92..c35ec36342d2 100644 --- a/azurerm/internal/services/powerbi/client/client.go +++ b/azurerm/internal/services/powerbi/client/client.go @@ -1,7 +1,7 @@ package client import ( - "github.com/Azure/azure-sdk-for-go/services/powerbidedicated/mgmt/2017-10-01/powerbidedicated" + "github.com/Azure/azure-sdk-for-go/services/powerbidedicated/mgmt/2021-01-01/powerbidedicated" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/common" ) diff --git a/azurerm/internal/services/powerbi/powerbi_embedded_resource.go b/azurerm/internal/services/powerbi/powerbi_embedded_resource.go index 80a75c81735c..e73d17616aa2 100644 --- a/azurerm/internal/services/powerbi/powerbi_embedded_resource.go +++ b/azurerm/internal/services/powerbi/powerbi_embedded_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/powerbidedicated/mgmt/2017-10-01/powerbidedicated" + "github.com/Azure/azure-sdk-for-go/services/powerbidedicated/mgmt/2021-01-01/powerbidedicated" "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" @@ -72,6 +72,17 @@ func resourcePowerBIEmbedded() *pluginsdk.Resource { }, }, + "mode": { + Type: pluginsdk.TypeString, + Optional: true, + Default: string(powerbidedicated.ModeGen1), + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + string(powerbidedicated.ModeGen1), + string(powerbidedicated.ModeGen2), + }, false), + }, + "tags": tags.Schema(), }, } @@ -98,6 +109,7 @@ func resourcePowerBIEmbeddedCreate(d *pluginsdk.ResourceData, meta interface{}) location := azure.NormalizeLocation(d.Get("location").(string)) administrators := d.Get("administrators").(*pluginsdk.Set).List() skuName := d.Get("sku_name").(string) + mode := d.Get("mode").(string) t := d.Get("tags").(map[string]interface{}) parameters := powerbidedicated.DedicatedCapacity{ @@ -106,8 +118,9 @@ func resourcePowerBIEmbeddedCreate(d *pluginsdk.ResourceData, meta interface{}) Administration: &powerbidedicated.DedicatedCapacityAdministrators{ Members: utils.ExpandStringSlice(administrators), }, + Mode: powerbidedicated.Mode(mode), }, - Sku: &powerbidedicated.ResourceSku{ + Sku: &powerbidedicated.CapacitySku{ Name: utils.String(skuName), }, Tags: tags.Expand(t), @@ -162,6 +175,8 @@ func resourcePowerBIEmbeddedRead(d *pluginsdk.ResourceData, meta interface{}) er if err := d.Set("administrators", utils.FlattenStringSlice(props.Administration.Members)); err != nil { return fmt.Errorf("Error setting `administration`: %+v", err) } + + d.Set("mode", props.Mode) } skuName := "" @@ -182,6 +197,7 @@ func resourcePowerBIEmbeddedUpdate(d *pluginsdk.ResourceData, meta interface{}) resourceGroup := d.Get("resource_group_name").(string) administrators := d.Get("administrators").(*pluginsdk.Set).List() skuName := d.Get("sku_name").(string) + mode := d.Get("mode").(string) t := d.Get("tags").(map[string]interface{}) parameters := powerbidedicated.DedicatedCapacityUpdateParameters{ @@ -189,8 +205,9 @@ func resourcePowerBIEmbeddedUpdate(d *pluginsdk.ResourceData, meta interface{}) Administration: &powerbidedicated.DedicatedCapacityAdministrators{ Members: utils.ExpandStringSlice(administrators), }, + Mode: powerbidedicated.Mode(mode), }, - Sku: &powerbidedicated.ResourceSku{ + Sku: &powerbidedicated.CapacitySku{ Name: utils.String(skuName), }, Tags: tags.Expand(t), diff --git a/azurerm/internal/services/powerbi/powerbi_embedded_resource_test.go b/azurerm/internal/services/powerbi/powerbi_embedded_resource_test.go index 0a0fdeabfa00..d4b65f2255ec 100644 --- a/azurerm/internal/services/powerbi/powerbi_embedded_resource_test.go +++ b/azurerm/internal/services/powerbi/powerbi_embedded_resource_test.go @@ -31,70 +31,81 @@ func TestAccPowerBIEmbedded_basic(t *testing.T) { }) } -func TestAccPowerBIEmbedded_requiresImport(t *testing.T) { +func TestAccPowerBIEmbedded_complete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_powerbi_embedded", "test") r := PowerBIEmbeddedResource{} data.ResourceTest(t, r, []acceptance.TestStep{ { - Config: r.basic(data), + Config: r.complete(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, - { - Config: r.requiresImport(data), - ExpectError: acceptance.RequiresImportError("azurerm_powerbi_embedded"), - }, + data.ImportStep(), }) } -func TestAccPowerBIEmbedded_complete(t *testing.T) { +func TestAccPowerBIEmbedded_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_powerbi_embedded", "test") r := PowerBIEmbeddedResource{} data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), { Config: r.complete(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("sku_name").HasValue("A2"), - check.That(data.ResourceName).Key("tags.ENV").HasValue("Test"), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), ), }, data.ImportStep(), }) } -func TestAccPowerBIEmbedded_update(t *testing.T) { +func TestAccPowerBIEmbedded_gen2(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_powerbi_embedded", "test") r := PowerBIEmbeddedResource{} data.ResourceTest(t, r, []acceptance.TestStep{ { - Config: r.basic(data), + Config: r.gen2(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("sku_name").HasValue("A1"), + check.That(data.ResourceName).Key("mode").HasValue("Gen2"), ), }, data.ImportStep(), + }) +} + +func TestAccPowerBIEmbedded_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_powerbi_embedded", "test") + r := PowerBIEmbeddedResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ { - Config: r.complete(data), + Config: r.basic(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("sku_name").HasValue("A2"), ), }, - data.ImportStep(), { - Config: r.basic(data), - Check: acceptance.ComposeTestCheckFunc( - check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("sku_name").HasValue("A1"), - ), + Config: r.requiresImport(data), + ExpectError: acceptance.RequiresImportError("azurerm_powerbi_embedded"), }, - data.ImportStep(), }) } @@ -112,65 +123,78 @@ func (PowerBIEmbeddedResource) Exists(ctx context.Context, clients *clients.Clie return utils.Bool(resp.DedicatedCapacityProperties != nil), nil } -func (PowerBIEmbeddedResource) basic(data acceptance.TestData) string { - template := PowerBIEmbeddedResource{}.template(data) +func (PowerBIEmbeddedResource) template(data acceptance.TestData) string { return fmt.Sprintf(` -%s +provider "azurerm" { + features {} +} -resource "azurerm_powerbi_embedded" "test" { - name = "acctestpowerbi%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - sku_name = "A1" - administrators = ["${data.azurerm_client_config.test.object_id}"] +resource "azurerm_resource_group" "test" { + name = "acctestRG-powerbi-%[1]d" + location = "%[2]s" } -`, template, data.RandomInteger) + +data "azurerm_client_config" "test" {} +`, data.RandomInteger, data.Locations.Primary) } -func (r PowerBIEmbeddedResource) requiresImport(data acceptance.TestData) string { +func (r PowerBIEmbeddedResource) basic(data acceptance.TestData) string { return fmt.Sprintf(` -%s +%[1]s -resource "azurerm_powerbi_embedded" "import" { - name = "${azurerm_powerbi_embedded.test.name}" - location = "${azurerm_powerbi_embedded.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" +resource "azurerm_powerbi_embedded" "test" { + name = "acctestpowerbi%[2]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name sku_name = "A1" - administrators = ["${data.azurerm_client_config.test.object_id}"] + administrators = [data.azurerm_client_config.test.object_id] } -`, r.basic(data)) +`, r.template(data), data.RandomInteger) } -func (PowerBIEmbeddedResource) complete(data acceptance.TestData) string { - template := PowerBIEmbeddedResource{}.template(data) +func (r PowerBIEmbeddedResource) complete(data acceptance.TestData) string { return fmt.Sprintf(` -%s +%[1]s resource "azurerm_powerbi_embedded" "test" { - name = "acctestpowerbi%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" + name = "acctestpowerbi%[2]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name sku_name = "A2" - administrators = ["${data.azurerm_client_config.test.object_id}"] + administrators = [data.azurerm_client_config.test.object_id] tags = { ENV = "Test" } } -`, template, data.RandomInteger) +`, r.template(data), data.RandomInteger) } -func (PowerBIEmbeddedResource) template(data acceptance.TestData) string { +func (r PowerBIEmbeddedResource) gen2(data acceptance.TestData) string { return fmt.Sprintf(` -provider "azurerm" { - features {} -} +%[1]s -resource "azurerm_resource_group" "test" { - name = "acctestRG-powerbi-%d" - location = "%s" +resource "azurerm_powerbi_embedded" "test" { + name = "acctestpowerbi%[2]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + sku_name = "A1" + administrators = [data.azurerm_client_config.test.object_id] + mode = "Gen2" +} +`, r.template(data), data.RandomInteger) } -data "azurerm_client_config" "test" {} -`, data.RandomInteger, data.Locations.Primary) +func (r PowerBIEmbeddedResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%[1]s + +resource "azurerm_powerbi_embedded" "import" { + name = azurerm_powerbi_embedded.test.name + location = azurerm_powerbi_embedded.test.location + resource_group_name = azurerm_resource_group.test.name + sku_name = "A1" + administrators = [data.azurerm_client_config.test.object_id] +} +`, r.basic(data)) } diff --git a/azurerm/internal/services/privatedns/private_dns_zone_resource_test.go b/azurerm/internal/services/privatedns/private_dns_zone_resource_test.go index c83a807b0da9..88273f32af7e 100644 --- a/azurerm/internal/services/privatedns/private_dns_zone_resource_test.go +++ b/azurerm/internal/services/privatedns/private_dns_zone_resource_test.go @@ -5,13 +5,12 @@ import ( "fmt" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/privatedns/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) type PrivateDnsZoneResource struct { diff --git a/azurerm/internal/services/privatedns/private_dns_zone_virtual_network_link_resource.go b/azurerm/internal/services/privatedns/private_dns_zone_virtual_network_link_resource.go index 9b224aa2da96..baab31bcb952 100644 --- a/azurerm/internal/services/privatedns/private_dns_zone_virtual_network_link_resource.go +++ b/azurerm/internal/services/privatedns/private_dns_zone_virtual_network_link_resource.go @@ -7,7 +7,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns" "github.com/hashicorp/go-azure-helpers/response" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -208,7 +207,7 @@ func resourcePrivateDnsZoneVirtualNetworkLinkDelete(d *pluginsdk.ResourceData, m Timeout: d.Timeout(pluginsdk.TimeoutDelete), } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for deletion of Virtual Network Link %q (Private DNS Zone %q / Resource Group %q): %+v", id.Name, id.PrivateDnsZoneName, id.ResourceGroup, err) } diff --git a/azurerm/internal/services/privatedns/private_dns_zone_virtual_network_link_resource_test.go b/azurerm/internal/services/privatedns/private_dns_zone_virtual_network_link_resource_test.go index 68cd8b10c291..59b7e66c9d54 100644 --- a/azurerm/internal/services/privatedns/private_dns_zone_virtual_network_link_resource_test.go +++ b/azurerm/internal/services/privatedns/private_dns_zone_virtual_network_link_resource_test.go @@ -5,11 +5,10 @@ import ( "fmt" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/privatedns/parse" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/privatedns/parse" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) diff --git a/azurerm/internal/services/recoveryservices/backup_container_storage_account_resource.go b/azurerm/internal/services/recoveryservices/backup_container_storage_account_resource.go index e0c7d817c597..1719ba61448f 100644 --- a/azurerm/internal/services/recoveryservices/backup_container_storage_account_resource.go +++ b/azurerm/internal/services/recoveryservices/backup_container_storage_account_resource.go @@ -219,7 +219,7 @@ func resourceBackupProtectionContainerStorageAccountWaitForOperation(ctx context } log.Printf("[DEBUG] Waiting for backup container operation %q (Vault %q) to complete", operationID, vaultName) - resp, err := state.WaitForState() + resp, err := state.WaitForStateContext(ctx) if err != nil { return resp.(backup.OperationStatus), err } diff --git a/azurerm/internal/services/recoveryservices/backup_policy_file_share_resource.go b/azurerm/internal/services/recoveryservices/backup_policy_file_share_resource.go index 47bd9030e4d2..313b1303f551 100644 --- a/azurerm/internal/services/recoveryservices/backup_policy_file_share_resource.go +++ b/azurerm/internal/services/recoveryservices/backup_policy_file_share_resource.go @@ -8,13 +8,12 @@ import ( "strings" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/recoveryservices/validate" - "github.com/Azure/azure-sdk-for-go/services/recoveryservices/mgmt/2019-05-13/backup" "github.com/Azure/go-autorest/autorest/date" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/recoveryservices/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/set" @@ -239,7 +238,7 @@ func resourceBackupProtectionPolicyFileShare() *pluginsdk.Resource { // if daily, we need daily retention // if weekly daily cannot be set, and we need weekly - CustomizeDiff: func(diff *pluginsdk.ResourceDiff, v interface{}) error { + CustomizeDiff: func(ctx context.Context, diff *pluginsdk.ResourceDiff, v interface{}) error { _, hasDaily := diff.GetOk("retention_daily") _, hasWeekly := diff.GetOk("retention_weekly") @@ -694,7 +693,7 @@ func resourceBackupProtectionPolicyFileShareWaitForUpdate(ctx context.Context, c state.Timeout = d.Timeout(pluginsdk.TimeoutUpdate) } - resp, err := state.WaitForState() + resp, err := state.WaitForStateContext(ctx) if err != nil { return resp.(backup.ProtectionPolicyResource), fmt.Errorf("Error waiting for the Recovery Service Protection Policy %q to update (Resource Group %q): %+v", policyName, resourceGroup, err) } @@ -712,7 +711,7 @@ func resourceBackupProtectionPolicyFileShareWaitForDeletion(ctx context.Context, Timeout: d.Timeout(pluginsdk.TimeoutDelete), } - resp, err := state.WaitForState() + resp, err := state.WaitForStateContext(ctx) if err != nil { return resp.(backup.ProtectionPolicyResource), fmt.Errorf("Error waiting for the Recovery Service Protection Policy %q to be missing (Resource Group %q): %+v", policyName, resourceGroup, err) } diff --git a/azurerm/internal/services/recoveryservices/backup_policy_vm_data_source.go b/azurerm/internal/services/recoveryservices/backup_policy_vm_data_source.go index c6c210cbbae6..be4bf038bb3e 100644 --- a/azurerm/internal/services/recoveryservices/backup_policy_vm_data_source.go +++ b/azurerm/internal/services/recoveryservices/backup_policy_vm_data_source.go @@ -6,10 +6,9 @@ import ( "strings" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/recoveryservices/validate" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/recoveryservices/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" diff --git a/azurerm/internal/services/recoveryservices/backup_policy_vm_resource.go b/azurerm/internal/services/recoveryservices/backup_policy_vm_resource.go index 4a981ff85084..dfde21f1966c 100644 --- a/azurerm/internal/services/recoveryservices/backup_policy_vm_resource.go +++ b/azurerm/internal/services/recoveryservices/backup_policy_vm_resource.go @@ -322,7 +322,7 @@ func resourceBackupProtectionPolicyVMCreateUpdate(d *pluginsdk.ResourceData, met // Less than 7 daily backups is no longer supported for create/update if (d.IsNewResource() || d.HasChange("retention_daily.0.count")) && (d.Get("retention_daily.0.count").(int) > 1 && d.Get("retention_daily.0.count").(int) < 7) { - return fmt.Errorf("The Azure API has recently changed behaviour so that provisioning a `count` for the `retention_daily` field can no longer be less than 7 days for new/updates to existing Backup Policies. Please ensure that `count` is less than 7, currently %d", d.Get("retention_daily.0.count").(int)) + return fmt.Errorf("The Azure API has recently changed behaviour so that provisioning a `count` for the `retention_daily` field can no longer be less than 7 days for new/updates to existing Backup Policies. Please ensure that `count` is greater than 7, currently %d", d.Get("retention_daily.0.count").(int)) } vmProtectionPolicyProperties := &backup.AzureIaaSVMProtectionPolicy{ @@ -738,7 +738,7 @@ func resourceBackupProtectionPolicyVMWaitForUpdate(ctx context.Context, client * state.Timeout = d.Timeout(pluginsdk.TimeoutUpdate) } - resp, err := state.WaitForState() + resp, err := state.WaitForStateContext(ctx) if err != nil { return resp.(backup.ProtectionPolicyResource), fmt.Errorf("Error waiting for the Azure Backup Protection Policy %q to be true (Resource Group %q) to provision: %+v", policyName, resourceGroup, err) } @@ -756,7 +756,7 @@ func resourceBackupProtectionPolicyVMWaitForDeletion(ctx context.Context, client Timeout: d.Timeout(pluginsdk.TimeoutDelete), } - resp, err := state.WaitForState() + resp, err := state.WaitForStateContext(ctx) if err != nil { return resp.(backup.ProtectionPolicyResource), fmt.Errorf("Error waiting for the Azure Backup Protection Policy %q to be false (Resource Group %q) to provision: %+v", policyName, resourceGroup, err) } diff --git a/azurerm/internal/services/recoveryservices/backup_protected_file_share_resource.go b/azurerm/internal/services/recoveryservices/backup_protected_file_share_resource.go index 1e5437441b5a..125a221db2c5 100644 --- a/azurerm/internal/services/recoveryservices/backup_protected_file_share_resource.go +++ b/azurerm/internal/services/recoveryservices/backup_protected_file_share_resource.go @@ -309,7 +309,7 @@ func resourceBackupProtectedFileShareWaitForOperation(ctx context.Context, clien } log.Printf("[DEBUG] Waiting for backup operation %s (Vault %s) to complete", operationID, vaultName) - resp, err := state.WaitForState() + resp, err := state.WaitForStateContext(ctx) if err != nil { return resp.(backup.OperationStatus), err } diff --git a/azurerm/internal/services/recoveryservices/backup_protected_vm_resource.go b/azurerm/internal/services/recoveryservices/backup_protected_vm_resource.go index 08815373953c..8d3715e72ab4 100644 --- a/azurerm/internal/services/recoveryservices/backup_protected_vm_resource.go +++ b/azurerm/internal/services/recoveryservices/backup_protected_vm_resource.go @@ -220,7 +220,7 @@ func resourceRecoveryServicesBackupProtectedVMWaitForStateCreateUpdate(ctx conte state.Timeout = d.Timeout(pluginsdk.TimeoutUpdate) } - resp, err := state.WaitForState() + resp, err := state.WaitForStateContext(ctx) if err != nil { i, _ := resp.(backup.ProtectedItemResource) return i, fmt.Errorf("Error waiting for the Azure Backup Protected VM %q to be true (Resource Group %q) to provision: %+v", protectedItemName, resourceGroup, err) @@ -239,7 +239,7 @@ func resourceRecoveryServicesBackupProtectedVMWaitForDeletion(ctx context.Contex Timeout: d.Timeout(pluginsdk.TimeoutDelete), } - resp, err := state.WaitForState() + resp, err := state.WaitForStateContext(ctx) if err != nil { i, _ := resp.(backup.ProtectedItemResource) return i, fmt.Errorf("Error waiting for the Azure Backup Protected VM %q to be false (Resource Group %q) to provision: %+v", protectedItemName, resourceGroup, err) diff --git a/azurerm/internal/services/recoveryservices/recovery_services_vault_resource.go b/azurerm/internal/services/recoveryservices/recovery_services_vault_resource.go index 2a57f7123da2..1e2f9d3084a3 100644 --- a/azurerm/internal/services/recoveryservices/recovery_services_vault_resource.go +++ b/azurerm/internal/services/recoveryservices/recovery_services_vault_resource.go @@ -6,13 +6,12 @@ import ( "strings" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/recoveryservices/validate" - "github.com/Azure/azure-sdk-for-go/services/recoveryservices/mgmt/2016-06-01/recoveryservices" "github.com/Azure/azure-sdk-for-go/services/recoveryservices/mgmt/2019-05-13/backup" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/recoveryservices/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" @@ -174,7 +173,7 @@ func resourceRecoveryServicesVaultCreateUpdate(d *pluginsdk.ResourceData, meta i stateConf.Timeout = d.Timeout(pluginsdk.TimeoutUpdate) } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("Error waiting for on update for Recovery Service Vault %q (Resource Group %q): %+v", name, resourceGroup, err) } diff --git a/azurerm/internal/services/recoveryservices/site_recovery_fabric_resource_test.go b/azurerm/internal/services/recoveryservices/site_recovery_fabric_resource_test.go index 5707fa075283..03feabaa36cc 100644 --- a/azurerm/internal/services/recoveryservices/site_recovery_fabric_resource_test.go +++ b/azurerm/internal/services/recoveryservices/site_recovery_fabric_resource_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) type SiteRecoveryFabricResource struct { diff --git a/azurerm/internal/services/recoveryservices/site_recovery_network_mapping_resource.go b/azurerm/internal/services/recoveryservices/site_recovery_network_mapping_resource.go index 9641c1d42f3b..580b82588fdd 100644 --- a/azurerm/internal/services/recoveryservices/site_recovery_network_mapping_resource.go +++ b/azurerm/internal/services/recoveryservices/site_recovery_network_mapping_resource.go @@ -5,12 +5,11 @@ import ( "net/http" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/recoveryservices/validate" - "github.com/Azure/azure-sdk-for-go/services/recoveryservices/mgmt/2018-07-10/siterecovery" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/recoveryservices/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" diff --git a/azurerm/internal/services/recoveryservices/site_recovery_protection_container_mapping_resource.go b/azurerm/internal/services/recoveryservices/site_recovery_protection_container_mapping_resource.go index f6e8f16701f0..47aafbc4a0c9 100644 --- a/azurerm/internal/services/recoveryservices/site_recovery_protection_container_mapping_resource.go +++ b/azurerm/internal/services/recoveryservices/site_recovery_protection_container_mapping_resource.go @@ -4,12 +4,11 @@ import ( "fmt" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/recoveryservices/validate" - "github.com/Azure/azure-sdk-for-go/services/recoveryservices/mgmt/2018-07-10/siterecovery" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/recoveryservices/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" diff --git a/azurerm/internal/services/recoveryservices/site_recovery_protection_container_mapping_resource_test.go b/azurerm/internal/services/recoveryservices/site_recovery_protection_container_mapping_resource_test.go index 2382e7183fd6..ae3f85633273 100644 --- a/azurerm/internal/services/recoveryservices/site_recovery_protection_container_mapping_resource_test.go +++ b/azurerm/internal/services/recoveryservices/site_recovery_protection_container_mapping_resource_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) type SiteRecoveryProtectionContainerMappingResource struct { diff --git a/azurerm/internal/services/recoveryservices/site_recovery_replicated_vm_resource.go b/azurerm/internal/services/recoveryservices/site_recovery_replicated_vm_resource.go index 730f1e3bb193..10ed87bf3575 100644 --- a/azurerm/internal/services/recoveryservices/site_recovery_replicated_vm_resource.go +++ b/azurerm/internal/services/recoveryservices/site_recovery_replicated_vm_resource.go @@ -2,18 +2,18 @@ package recoveryservices import ( "bytes" + "context" "fmt" "log" "strings" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/recoveryservices/validate" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" "github.com/Azure/azure-sdk-for-go/services/recoveryservices/mgmt/2018-07-10/siterecovery" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/recoveryservices/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/suppress" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/validation" @@ -164,6 +164,13 @@ func resourceSiteRecoveryReplicatedVM() *pluginsdk.Resource { }, true), DiffSuppressFunc: suppress.CaseDifference, }, + "target_disk_encryption_set_id": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + DiffSuppressFunc: suppress.CaseDifference, + }, }, }, }, @@ -253,6 +260,7 @@ func resourceSiteRecoveryReplicatedItemCreate(d *pluginsdk.ResourceData, meta in recoveryResourceGroupId := diskInput["target_resource_group_id"].(string) targetReplicaDiskType := diskInput["target_replica_disk_type"].(string) targetDiskType := diskInput["target_disk_type"].(string) + targetEncryptionDiskSetID := diskInput["target_disk_encryption_set_id"].(string) managedDisks = append(managedDisks, siterecovery.A2AVMManagedDiskInputDetails{ DiskID: &diskId, @@ -260,6 +268,7 @@ func resourceSiteRecoveryReplicatedItemCreate(d *pluginsdk.ResourceData, meta in RecoveryResourceGroupID: &recoveryResourceGroupId, RecoveryReplicaDiskAccountType: &targetReplicaDiskType, RecoveryTargetDiskAccountType: &targetDiskType, + RecoveryDiskEncryptionSetID: &targetEncryptionDiskSetID, }) } @@ -300,8 +309,11 @@ func resourceSiteRecoveryReplicatedItemUpdate(d *pluginsdk.ResourceData, meta in vaultName := d.Get("recovery_vault_name").(string) client := meta.(*clients.Client).RecoveryServices.ReplicationMigrationItemsClient(resGroup, vaultName) + ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + // We are only allowed to update the configuration once the VM is fully protected - state, err := waitForReplicationToBeHealthy(d, meta) + state, err := waitForReplicationToBeHealthy(ctx, d, meta) if err != nil { return err } @@ -319,9 +331,6 @@ func resourceSiteRecoveryReplicatedItemUpdate(d *pluginsdk.ResourceData, meta in targetAvailabilitySetID = nil } - ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) - defer cancel() - vmNics := []siterecovery.VMNicInputDetails{} for _, raw := range d.Get("network_interface").(*pluginsdk.Set).List() { vmNicInput := raw.(map[string]interface{}) @@ -448,11 +457,41 @@ func resourceSiteRecoveryReplicatedItemRead(d *pluginsdk.ResourceData, meta inte disksOutput := make([]interface{}, 0) for _, disk := range *a2aDetails.ProtectedManagedDisks { diskOutput := make(map[string]interface{}) - diskOutput["disk_id"] = *disk.DiskID - diskOutput["staging_storage_account_id"] = *disk.PrimaryStagingAzureStorageAccountID - diskOutput["target_resource_group_id"] = *disk.RecoveryResourceGroupID - diskOutput["target_replica_disk_type"] = *disk.RecoveryReplicaDiskAccountType - diskOutput["target_disk_type"] = *disk.RecoveryTargetDiskAccountType + diskId := "" + if disk.DiskID != nil { + diskId = *disk.DiskID + } + diskOutput["disk_id"] = diskId + + primaryStagingAzureStorageAccountID := "" + if disk.PrimaryStagingAzureStorageAccountID != nil { + primaryStagingAzureStorageAccountID = *disk.PrimaryStagingAzureStorageAccountID + } + diskOutput["staging_storage_account_id"] = primaryStagingAzureStorageAccountID + + recoveryResourceGroupID := "" + if disk.RecoveryResourceGroupID != nil { + recoveryResourceGroupID = *disk.RecoveryResourceGroupID + } + diskOutput["target_resource_group_id"] = recoveryResourceGroupID + + recoveryReplicaDiskAccountType := "" + if disk.RecoveryReplicaDiskAccountType != nil { + recoveryReplicaDiskAccountType = *disk.RecoveryReplicaDiskAccountType + } + diskOutput["target_replica_disk_type"] = recoveryReplicaDiskAccountType + + recoveryTargetDiskAccountType := "" + if disk.RecoveryTargetDiskAccountType != nil { + recoveryTargetDiskAccountType = *disk.RecoveryTargetDiskAccountType + } + diskOutput["target_disk_type"] = recoveryTargetDiskAccountType + + recoveryEncryptionSetId := "" + if disk.RecoveryDiskEncryptionSetID != nil { + recoveryEncryptionSetId = *disk.RecoveryDiskEncryptionSetID + } + diskOutput["target_disk_encryption_set_id"] = recoveryEncryptionSetId disksOutput = append(disksOutput, diskOutput) } @@ -529,7 +568,7 @@ func resourceSiteRecoveryReplicatedVMDiskHash(v interface{}) int { return pluginsdk.HashString(buf.String()) } -func waitForReplicationToBeHealthy(d *pluginsdk.ResourceData, meta interface{}) (*siterecovery.ReplicationProtectedItem, error) { +func waitForReplicationToBeHealthy(ctx context.Context, d *pluginsdk.ResourceData, meta interface{}) (*siterecovery.ReplicationProtectedItem, error) { log.Printf("Waiting for Site Recover to replicate VM.") stateConf := &pluginsdk.StateChangeConf{ Target: []string{"Protected"}, @@ -539,7 +578,7 @@ func waitForReplicationToBeHealthy(d *pluginsdk.ResourceData, meta interface{}) stateConf.Timeout = d.Timeout(pluginsdk.TimeoutUpdate) - result, err := stateConf.WaitForState() + result, err := stateConf.WaitForStateContext(ctx) if err != nil { return nil, fmt.Errorf("Error waiting for site recovery to replicate vm: %+v", err) } diff --git a/azurerm/internal/services/recoveryservices/site_recovery_replicated_vm_resource_test.go b/azurerm/internal/services/recoveryservices/site_recovery_replicated_vm_resource_test.go index e9ded0f488ca..706cb2fe151b 100644 --- a/azurerm/internal/services/recoveryservices/site_recovery_replicated_vm_resource_test.go +++ b/azurerm/internal/services/recoveryservices/site_recovery_replicated_vm_resource_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) type SiteRecoveryReplicatedVmResource struct { @@ -32,6 +31,21 @@ func TestAccSiteRecoveryReplicatedVm_basic(t *testing.T) { }) } +func TestAccSiteRecoveryReplicatedVm_des(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_site_recovery_replicated_vm", "test") + r := SiteRecoveryReplicatedVmResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.des(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func (SiteRecoveryReplicatedVmResource) basic(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { @@ -255,6 +269,409 @@ resource "azurerm_site_recovery_replicated_vm" "test" { `, data.RandomInteger, data.Locations.Primary, data.Locations.Secondary) } +func (SiteRecoveryReplicatedVmResource) des(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features { + key_vault { + purge_soft_delete_on_destroy = false + } + } +} + +data "azurerm_client_config" "current" {} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-recovery-%[1]d-1" + location = "%[2]s" +} + +resource "azurerm_resource_group" "test2" { + name = "acctestRG-recovery-%[1]d-2" + location = "%[3]s" +} + + +resource "azurerm_key_vault" "test" { + name = "kv%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + tenant_id = data.azurerm_client_config.current.tenant_id + sku_name = "standard" + enabled_for_disk_encryption = true + purge_protection_enabled = true +} + + +resource "azurerm_key_vault_access_policy" "service-principal" { + key_vault_id = azurerm_key_vault.test.id + tenant_id = data.azurerm_client_config.current.tenant_id + object_id = data.azurerm_client_config.current.object_id + + key_permissions = [ + "create", + "delete", + "get", + "purge", + "update", + ] + + secret_permissions = [ + "get", + "delete", + "set", + ] +} + +resource "azurerm_key_vault_key" "test" { + name = "examplekey" + key_vault_id = azurerm_key_vault.test.id + key_type = "RSA" + key_size = 2048 + + key_opts = [ + "decrypt", + "encrypt", + "sign", + "unwrapKey", + "verify", + "wrapKey", + ] + + depends_on = ["azurerm_key_vault_access_policy.service-principal"] +} + +resource "azurerm_disk_encryption_set" "test" { + name = "acctestdes-%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + key_vault_key_id = azurerm_key_vault_key.test.id + + identity { + type = "SystemAssigned" + } +} + +resource "azurerm_key_vault_access_policy" "disk-encryption" { + key_vault_id = azurerm_key_vault.test.id + + key_permissions = [ + "get", + "wrapkey", + "unwrapkey", + ] + + tenant_id = azurerm_disk_encryption_set.test.identity.0.tenant_id + object_id = azurerm_disk_encryption_set.test.identity.0.principal_id +} + +resource "azurerm_role_assignment" "disk-encryption-read-keyvault" { + scope = azurerm_key_vault.test.id + role_definition_name = "Reader" + principal_id = azurerm_disk_encryption_set.test.identity.0.principal_id +} + +resource "azurerm_managed_disk" "test" { + name = "acctestd-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + storage_account_type = "Standard_LRS" + create_option = "Empty" + disk_size_gb = 1 + disk_encryption_set_id = azurerm_disk_encryption_set.test.id + + depends_on = [ + "azurerm_role_assignment.disk-encryption-read-keyvault", + "azurerm_key_vault_access_policy.disk-encryption", + ] +} + +resource "azurerm_virtual_network" "test" { + name = "acctvn-%[1]d" + address_space = ["10.0.0.0/16"] + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_subnet" "test" { + name = "acctsub-%[1]d" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name + address_prefix = "10.0.2.0/24" +} + +resource "azurerm_network_interface" "test" { + name = "acctni-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + + ip_configuration { + name = "testconfiguration1" + subnet_id = azurerm_subnet.test.id + private_ip_address_allocation = "Dynamic" + } +} + + +resource "azurerm_virtual_machine" "test" { + name = "acctvm-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + network_interface_ids = [azurerm_network_interface.test.id] + vm_size = "Standard_D1_v2" + + storage_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } + + storage_os_disk { + name = "osd-%[1]d" + caching = "ReadWrite" + create_option = "FromImage" + disk_size_gb = "50" + managed_disk_type = "Standard_LRS" + } + + storage_data_disk { + name = "acctmd-%[1]d" + create_option = "Empty" + disk_size_gb = "1" + lun = 0 + managed_disk_type = "Standard_LRS" + } + + storage_data_disk { + name = azurerm_managed_disk.test.name + create_option = "Attach" + disk_size_gb = "1" + lun = 1 + managed_disk_id = azurerm_managed_disk.test.id + } + + os_profile { + computer_name = "hn%[1]d" + admin_username = "testadmin" + admin_password = "Password1234!" + } + + os_profile_linux_config { + disable_password_authentication = false + } + + tags = { + environment = "Production" + cost-center = "Ops" + } +} + +resource "azurerm_recovery_services_vault" "test" { + name = "acctest-vault-%[1]d" + location = azurerm_resource_group.test2.location + resource_group_name = azurerm_resource_group.test2.name + sku = "Standard" + + soft_delete_enabled = false +} + +resource "azurerm_site_recovery_fabric" "test1" { + resource_group_name = azurerm_resource_group.test2.name + recovery_vault_name = azurerm_recovery_services_vault.test.name + name = "acctest-fabric1-%[1]d" + location = azurerm_resource_group.test.location +} + +resource "azurerm_site_recovery_fabric" "test2" { + resource_group_name = azurerm_resource_group.test2.name + recovery_vault_name = azurerm_recovery_services_vault.test.name + name = "acctest-fabric2-%[1]d" + location = azurerm_resource_group.test2.location + depends_on = [azurerm_site_recovery_fabric.test1] +} + +resource "azurerm_site_recovery_protection_container" "test1" { + resource_group_name = azurerm_resource_group.test2.name + recovery_vault_name = azurerm_recovery_services_vault.test.name + recovery_fabric_name = azurerm_site_recovery_fabric.test1.name + name = "acctest-protection-cont1-%[1]d" +} + +resource "azurerm_site_recovery_protection_container" "test2" { + resource_group_name = azurerm_resource_group.test2.name + recovery_vault_name = azurerm_recovery_services_vault.test.name + recovery_fabric_name = azurerm_site_recovery_fabric.test2.name + name = "acctest-protection-cont2-%[1]d" +} + +resource "azurerm_site_recovery_replication_policy" "test" { + resource_group_name = azurerm_resource_group.test2.name + recovery_vault_name = azurerm_recovery_services_vault.test.name + name = "acctest-policy-%[1]d" + recovery_point_retention_in_minutes = 24 * 60 + application_consistent_snapshot_frequency_in_minutes = 4 * 60 +} + +resource "azurerm_site_recovery_protection_container_mapping" "test" { + resource_group_name = azurerm_resource_group.test2.name + recovery_vault_name = azurerm_recovery_services_vault.test.name + recovery_fabric_name = azurerm_site_recovery_fabric.test1.name + recovery_source_protection_container_name = azurerm_site_recovery_protection_container.test1.name + recovery_target_protection_container_id = azurerm_site_recovery_protection_container.test2.id + recovery_replication_policy_id = azurerm_site_recovery_replication_policy.test.id + name = "mapping-%[1]d" +} + +resource "azurerm_virtual_network" "test2" { + name = "net-%[1]d-2" + resource_group_name = azurerm_resource_group.test2.name + address_space = ["192.168.2.0/24"] + location = azurerm_site_recovery_fabric.test2.location +} + +resource "azurerm_subnet" "test2_1" { + name = "acctest-snet-%[1]d-2" + resource_group_name = "${azurerm_resource_group.test2.name}" + virtual_network_name = "${azurerm_virtual_network.test2.name}" + address_prefixes = ["192.168.2.0/27"] +} + +resource "azurerm_subnet" "test2_2" { + name = "snet-%[1]d-3" + resource_group_name = "${azurerm_resource_group.test2.name}" + virtual_network_name = "${azurerm_virtual_network.test2.name}" + address_prefixes = ["192.168.2.32/27"] +} + +resource "azurerm_site_recovery_network_mapping" "test" { + resource_group_name = azurerm_resource_group.test2.name + recovery_vault_name = azurerm_recovery_services_vault.test.name + name = "mapping-%[1]d" + source_recovery_fabric_name = azurerm_site_recovery_fabric.test1.name + target_recovery_fabric_name = azurerm_site_recovery_fabric.test2.name + source_network_id = azurerm_virtual_network.test.id + target_network_id = azurerm_virtual_network.test2.id +} + +resource "azurerm_public_ip" "test-recovery" { + name = "pubip%[1]d-recovery" + allocation_method = "Static" + location = azurerm_resource_group.test2.location + resource_group_name = azurerm_resource_group.test2.name + sku = "Basic" +} + +resource "azurerm_key_vault" "test2" { + name = "kv%[1]d2" + location = azurerm_resource_group.test2.location + resource_group_name = azurerm_resource_group.test2.name + tenant_id = data.azurerm_client_config.current.tenant_id + sku_name = "standard" + enabled_for_disk_encryption = true + purge_protection_enabled = true +} + +resource "azurerm_key_vault_access_policy" "service-principal2" { + key_vault_id = azurerm_key_vault.test2.id + tenant_id = data.azurerm_client_config.current.tenant_id + object_id = data.azurerm_client_config.current.object_id + + key_permissions = [ + "create", + "delete", + "get", + "purge", + "update", + ] + + secret_permissions = [ + "get", + "delete", + "set", + ] +} + +resource "azurerm_key_vault_key" "test2" { + name = "examplekey" + key_vault_id = azurerm_key_vault.test2.id + key_type = "RSA" + key_size = 2048 + + key_opts = [ + "decrypt", + "encrypt", + "sign", + "unwrapKey", + "verify", + "wrapKey", + ] + + depends_on = ["azurerm_key_vault_access_policy.service-principal2"] +} + +resource "azurerm_disk_encryption_set" "test2" { + name = "acctestdes-%[1]d2" + resource_group_name = azurerm_resource_group.test2.name + location = azurerm_resource_group.test2.location + key_vault_key_id = azurerm_key_vault_key.test2.id + + identity { + type = "SystemAssigned" + } +} + +resource "azurerm_key_vault_access_policy" "disk-encryption2" { + key_vault_id = azurerm_key_vault.test2.id + + key_permissions = [ + "get", + "wrapkey", + "unwrapkey", + ] + + tenant_id = azurerm_disk_encryption_set.test2.identity.0.tenant_id + object_id = azurerm_disk_encryption_set.test2.identity.0.principal_id +} + +resource "azurerm_storage_account" "test" { + name = "acct%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + account_tier = "Standard" + account_replication_type = "LRS" +} + +resource "azurerm_site_recovery_replicated_vm" "test" { + name = "repl-%[1]d" + resource_group_name = azurerm_resource_group.test2.name + recovery_vault_name = azurerm_recovery_services_vault.test.name + source_vm_id = azurerm_virtual_machine.test.id + source_recovery_fabric_name = azurerm_site_recovery_fabric.test1.name + recovery_replication_policy_id = azurerm_site_recovery_replication_policy.test.id + source_recovery_protection_container_name = azurerm_site_recovery_protection_container.test1.name + + target_resource_group_id = azurerm_resource_group.test2.id + target_recovery_fabric_id = azurerm_site_recovery_fabric.test2.id + target_recovery_protection_container_id = azurerm_site_recovery_protection_container.test2.id + + managed_disk { + disk_id = azurerm_virtual_machine.test.storage_os_disk[0].managed_disk_id + staging_storage_account_id = azurerm_storage_account.test.id + target_resource_group_id = azurerm_resource_group.test2.id + target_disk_type = "Premium_LRS" + target_replica_disk_type = "Premium_LRS" + target_disk_encryption_set_id = azurerm_disk_encryption_set.test2.id + } + + depends_on = [ + azurerm_site_recovery_protection_container_mapping.test, + azurerm_site_recovery_network_mapping.test, + ] +} +`, data.RandomInteger, data.Locations.Primary, data.Locations.Secondary) +} + func (t SiteRecoveryReplicatedVmResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := azure.ParseAzureResourceID(state.ID) if err != nil { diff --git a/azurerm/internal/services/recoveryservices/site_recovery_replication_policy_resource_test.go b/azurerm/internal/services/recoveryservices/site_recovery_replication_policy_resource_test.go index de2b4bcf4a81..8dd91f2f9cb0 100644 --- a/azurerm/internal/services/recoveryservices/site_recovery_replication_policy_resource_test.go +++ b/azurerm/internal/services/recoveryservices/site_recovery_replication_policy_resource_test.go @@ -6,12 +6,11 @@ import ( "testing" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/pluginsdk" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) type SiteRecoveryReplicationPolicyResource struct { diff --git a/azurerm/internal/services/redis/redis_cache_data_source.go b/azurerm/internal/services/redis/redis_cache_data_source.go index 8c75404029bc..ffe765e97389 100644 --- a/azurerm/internal/services/redis/redis_cache_data_source.go +++ b/azurerm/internal/services/redis/redis_cache_data_source.go @@ -163,6 +163,12 @@ func dataSourceRedisCache() *pluginsdk.Resource { Type: pluginsdk.TypeString, Computed: true, }, + + "maintenance_window": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "start_hour_utc": { Type: pluginsdk.TypeInt, Computed: true, diff --git a/azurerm/internal/services/redis/redis_cache_resource.go b/azurerm/internal/services/redis/redis_cache_resource.go index 669a2b28fee8..f0771a29c982 100644 --- a/azurerm/internal/services/redis/redis_cache_resource.go +++ b/azurerm/internal/services/redis/redis_cache_resource.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/go-azure-helpers/response" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + azValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/location" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" @@ -223,6 +224,14 @@ func resourceRedisCache() *pluginsdk.Resource { DiffSuppressFunc: suppress.CaseDifference, ValidateFunc: validation.IsDayOfTheWeek(true), }, + + "maintenance_window": { + Type: pluginsdk.TypeString, + Optional: true, + Default: "PT5H", + ValidateFunc: azValidate.ISO8601Duration, + }, + "start_hour_utc": { Type: pluginsdk.TypeInt, Optional: true, @@ -395,7 +404,7 @@ func resourceRedisCacheCreate(d *pluginsdk.ResourceData, meta interface{}) error Timeout: d.Timeout(pluginsdk.TimeoutCreate), } - if _, err = stateConf.WaitForState(); err != nil { + if _, err = stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for Redis Cache %q (Resource Group %q) to become available: %s", id.RediName, id.ResourceGroup, err) } @@ -486,7 +495,7 @@ func resourceRedisCacheUpdate(d *pluginsdk.ResourceData, meta interface{}) error Timeout: d.Timeout(pluginsdk.TimeoutUpdate), } - if _, err = stateConf.WaitForState(); err != nil { + if _, err = stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for Redis Cache %q (Resource Group %q) to become available: %+v", id.RediName, id.ResourceGroup, err) } @@ -746,11 +755,13 @@ func expandRedisPatchSchedule(d *pluginsdk.ResourceData) *redis.PatchSchedule { for _, scheduleValue := range scheduleValues { vals := scheduleValue.(map[string]interface{}) dayOfWeek := vals["day_of_week"].(string) + maintenanceWindow := vals["maintenance_window"].(string) startHourUtc := vals["start_hour_utc"].(int) entry := redis.ScheduleEntry{ - DayOfWeek: redis.DayOfWeek(dayOfWeek), - StartHourUtc: utils.Int32(int32(startHourUtc)), + DayOfWeek: redis.DayOfWeek(dayOfWeek), + MaintenanceWindow: utils.String(maintenanceWindow), + StartHourUtc: utils.Int32(int32(startHourUtc)), } entries = append(entries, entry) } @@ -875,6 +886,7 @@ func flattenRedisPatchSchedules(schedule redis.PatchSchedule) []interface{} { output := make(map[string]interface{}) output["day_of_week"] = string(entry.DayOfWeek) + output["maintenance_window"] = *entry.MaintenanceWindow output["start_hour_utc"] = int(*entry.StartHourUtc) outputs = append(outputs, output) diff --git a/azurerm/internal/services/redis/redis_cache_resource_test.go b/azurerm/internal/services/redis/redis_cache_resource_test.go index 8b49749321c4..9dc860c06126 100644 --- a/azurerm/internal/services/redis/redis_cache_resource_test.go +++ b/azurerm/internal/services/redis/redis_cache_resource_test.go @@ -781,8 +781,9 @@ resource "azurerm_redis_cache" "test" { } patch_schedule { - day_of_week = "Tuesday" - start_hour_utc = 8 + day_of_week = "Tuesday" + start_hour_utc = 8 + maintenance_window = "PT7H" } } `, data.RandomInteger, data.Locations.Primary, data.RandomInteger) diff --git a/azurerm/internal/services/redis/redis_linked_server_resource.go b/azurerm/internal/services/redis/redis_linked_server_resource.go index 57f43bd767ea..9aa00b2a0e30 100644 --- a/azurerm/internal/services/redis/redis_linked_server_resource.go +++ b/azurerm/internal/services/redis/redis_linked_server_resource.go @@ -131,7 +131,7 @@ func resourceRedisLinkedServerCreate(d *pluginsdk.ResourceData, meta interface{} Timeout: d.Timeout(pluginsdk.TimeoutCreate), } - if _, err = stateConf.WaitForState(); err != nil { + if _, err = stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for Linked Server %q (Redis Cache %q / Resource Group %q) to become available: %+v", resourceId.Name, resourceId.RediName, resourceId.ResourceGroup, err) } @@ -211,7 +211,7 @@ func resourceRedisLinkedServerDelete(d *pluginsdk.ResourceData, meta interface{} Timeout: d.Timeout(pluginsdk.TimeoutDelete), } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for Linked Server %q (Redis Cache %q / Resource Group %q) to be deleted: %+v", id.Name, id.RediName, id.ResourceGroup, err) } diff --git a/azurerm/internal/services/redisenterprise/redis_enterprise_cluster_resource.go b/azurerm/internal/services/redisenterprise/redis_enterprise_cluster_resource.go index d833ef447f6b..faaf93f39b0a 100644 --- a/azurerm/internal/services/redisenterprise/redis_enterprise_cluster_resource.go +++ b/azurerm/internal/services/redisenterprise/redis_enterprise_cluster_resource.go @@ -175,7 +175,7 @@ func resourceRedisEnterpriseClusterCreate(d *pluginsdk.ResourceData, meta interf Timeout: d.Timeout(pluginsdk.TimeoutCreate), } - if _, err = stateConf.WaitForState(); err != nil { + if _, err = stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for Redis Enterprise Cluster (Name %q / Resource Group %q) to become available: %+v", resourceId.RedisEnterpriseName, resourceId.ResourceGroup, err) } diff --git a/azurerm/internal/services/relay/relay_hybrid_connection_resource.go b/azurerm/internal/services/relay/relay_hybrid_connection_resource.go index 8a1bb5e1e6ec..e31af0228a58 100644 --- a/azurerm/internal/services/relay/relay_hybrid_connection_resource.go +++ b/azurerm/internal/services/relay/relay_hybrid_connection_resource.go @@ -8,7 +8,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/relay/mgmt/2017-04-01/relay" "github.com/hashicorp/go-azure-helpers/response" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -169,7 +168,7 @@ func resourceArmRelayHybridConnectionDelete(d *pluginsdk.ResourceData, meta inte Timeout: d.Timeout(pluginsdk.TimeoutDelete), } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("waiting for Relay Hybrid Connection %q (Namespace %q Resource Group %q) to be deleted: %+v", id.Name, id.NamespaceName, id.ResourceGroup, err) } diff --git a/azurerm/internal/services/relay/relay_namespace_resource.go b/azurerm/internal/services/relay/relay_namespace_resource.go index b5f03ab9271b..d6d14fcfc485 100644 --- a/azurerm/internal/services/relay/relay_namespace_resource.go +++ b/azurerm/internal/services/relay/relay_namespace_resource.go @@ -8,7 +8,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/relay/mgmt/2017-04-01/relay" "github.com/hashicorp/go-azure-helpers/response" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" @@ -216,7 +215,7 @@ func resourceRelayNamespaceDelete(d *pluginsdk.ResourceData, meta interface{}) e Timeout: d.Timeout(pluginsdk.TimeoutDelete), } - if _, err := stateConf.WaitForState(); err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("Error waiting for Relay Namespace %q (Resource Group %q) to be deleted: %s", id.Name, id.ResourceGroup, err) } diff --git a/azurerm/internal/services/resource/client/client.go b/azurerm/internal/services/resource/client/client.go index e4bbcf54d77b..93b6411c8836 100644 --- a/azurerm/internal/services/resource/client/client.go +++ b/azurerm/internal/services/resource/client/client.go @@ -13,6 +13,7 @@ type Client struct { GroupsClient *resources.GroupsClient LocksClient *locks.ManagementLocksClient ProvidersClient *providers.ProvidersClient + ResourceProvidersClient *resources.ProvidersClient ResourcesClient *resources.Client TemplateSpecsVersionsClient *templatespecs.VersionsClient } @@ -31,6 +32,10 @@ func NewClient(o *common.ClientOptions) *Client { providersClient := providers.NewProvidersClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&providersClient.Client, o.ResourceManagerAuthorizer) + // add a secondary ProvidersClient to use latest resources sdk + resourceProvidersClient := resources.NewProvidersClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&resourceProvidersClient.Client, o.ResourceManagerAuthorizer) + resourcesClient := resources.NewClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&resourcesClient.Client, o.ResourceManagerAuthorizer) @@ -42,6 +47,7 @@ func NewClient(o *common.ClientOptions) *Client { DeploymentsClient: &deploymentsClient, LocksClient: &locksClient, ProvidersClient: &providersClient, + ResourceProvidersClient: &resourceProvidersClient, ResourcesClient: &resourcesClient, TemplateSpecsVersionsClient: &templatespecsVersionsClient, } diff --git a/azurerm/internal/services/resource/management_group_template_deployment_resource_test.go b/azurerm/internal/services/resource/management_group_template_deployment_resource_test.go index 291416efc81a..7d5bf6290509 100644 --- a/azurerm/internal/services/resource/management_group_template_deployment_resource_test.go +++ b/azurerm/internal/services/resource/management_group_template_deployment_resource_test.go @@ -97,7 +97,7 @@ resource "azurerm_management_group_template_deployment" "test" { template_content = <