diff --git a/azurerm/resource_arm_virtual_machine_scale_set.go b/azurerm/resource_arm_virtual_machine_scale_set.go index 005d0691eede..29d32cb97b7b 100644 --- a/azurerm/resource_arm_virtual_machine_scale_set.go +++ b/azurerm/resource_arm_virtual_machine_scale_set.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "log" + "strings" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-06-01/compute" @@ -119,6 +120,57 @@ func resourceArmVirtualMachineScaleSet() *schema.Resource { string(compute.Manual), string(compute.Rolling), }, true), + DiffSuppressFunc: ignoreCaseDiffSuppressFunc, + }, + + "health_probe_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: azure.ValidateResourceID, + }, + + "automatic_os_upgrade": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "rolling_upgrade_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_batch_instance_percent": { + Type: schema.TypeInt, + Optional: true, + Default: 20, + ValidateFunc: validation.IntBetween(5, 100), + }, + + "max_unhealthy_instance_percent": { + Type: schema.TypeInt, + Optional: true, + Default: 20, + ValidateFunc: validation.IntBetween(5, 100), + }, + + "max_unhealthy_upgraded_instance_percent": { + Type: schema.TypeInt, + Optional: true, + Default: 20, + ValidateFunc: validation.IntBetween(5, 100), + }, + + "pause_time_between_batches": { + Type: schema.TypeString, + Optional: true, + Default: "PT0S", + ValidateFunc: validateIso8601Duration(), + }, + }, + }, + DiffSuppressFunc: azureRmVirtualMachineScaleSetSuppressRollingUpgradePolicyDiff, }, "overprovision": { @@ -659,6 +711,8 @@ func resourceArmVirtualMachineScaleSet() *schema.Resource { "tags": tagsSchema(), }, + + CustomizeDiff: azureRmVirtualMachineScaleSetCustomizeDiff, } } @@ -712,14 +766,17 @@ func resourceArmVirtualMachineScaleSetCreate(d *schema.ResourceData, meta interf return err } - updatePolicy := d.Get("upgrade_policy_mode").(string) + upgradePolicy := d.Get("upgrade_policy_mode").(string) + automaticOsUpgrade := d.Get("automatic_os_upgrade").(bool) overprovision := d.Get("overprovision").(bool) singlePlacementGroup := d.Get("single_placement_group").(bool) priority := d.Get("priority").(string) scaleSetProps := compute.VirtualMachineScaleSetProperties{ UpgradePolicy: &compute.UpgradePolicy{ - Mode: compute.UpgradeMode(updatePolicy), + Mode: compute.UpgradeMode(upgradePolicy), + AutomaticOSUpgrade: utils.Bool(automaticOsUpgrade), + RollingUpgradePolicy: expandAzureRmRollingUpgradePolicy(d), }, VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{ NetworkProfile: expandAzureRmVirtualMachineScaleSetNetworkProfile(d), @@ -737,6 +794,12 @@ func resourceArmVirtualMachineScaleSetCreate(d *schema.ResourceData, meta interf scaleSetProps.VirtualMachineProfile.DiagnosticsProfile = &diagnosticProfile } + if v, ok := d.GetOk("health_probe_id"); ok { + scaleSetProps.VirtualMachineProfile.NetworkProfile.HealthProbe = &compute.APIEntityReference{ + ID: utils.String(v.(string)), + } + } + properties := compute.VirtualMachineScaleSet{ Name: &name, Location: &location, @@ -823,9 +886,15 @@ func resourceArmVirtualMachineScaleSetRead(d *schema.ResourceData, meta interfac } if properties := resp.VirtualMachineScaleSetProperties; properties != nil { - if upgradePolicy := properties.UpgradePolicy; upgradePolicy != nil { d.Set("upgrade_policy_mode", upgradePolicy.Mode) + d.Set("automatic_os_upgrade", upgradePolicy.AutomaticOSUpgrade) + + if rollingUpgradePolicy := upgradePolicy.RollingUpgradePolicy; rollingUpgradePolicy != nil { + if err := d.Set("rolling_upgrade_policy", flattenAzureRmVirtualMachineScaleSetRollingUpgradePolicy(rollingUpgradePolicy)); err != nil { + return fmt.Errorf("[DEBUG] Error setting Virtual Machine Scale Set Rolling Upgrade Policy error: %#v", err) + } + } } d.Set("overprovision", properties.Overprovision) d.Set("single_placement_group", properties.SinglePlacementGroup) @@ -852,7 +921,6 @@ func resourceArmVirtualMachineScaleSetRead(d *schema.ResourceData, meta interfac if err := d.Set("os_profile_secrets", flattenedSecrets); err != nil { return fmt.Errorf("[DEBUG] Error setting `os_profile_secrets`: %#v", err) } - } if windowsConfiguration := osProfile.WindowsConfiguration; windowsConfiguration != nil { @@ -874,6 +942,12 @@ func resourceArmVirtualMachineScaleSetRead(d *schema.ResourceData, meta interfac } if networkProfile := profile.NetworkProfile; networkProfile != nil { + if hp := networkProfile.HealthProbe; hp != nil { + if id := hp.ID; id != nil { + d.Set("health_probe_id", id) + } + } + flattenedNetworkProfile := flattenAzureRmVirtualMachineScaleSetNetworkProfile(networkProfile) if err := d.Set("network_profile", flattenedNetworkProfile); err != nil { return fmt.Errorf("[DEBUG] Error setting `network_profile`: %#v", err) @@ -913,6 +987,7 @@ func resourceArmVirtualMachineScaleSetRead(d *schema.ResourceData, meta interfac } } } + } if plan := resp.Plan; plan != nil { @@ -1088,6 +1163,25 @@ func flattenAzureRmVirtualMachineScaleSetBootDiagnostics(bootDiagnostic *compute return []interface{}{b} } +func flattenAzureRmVirtualMachineScaleSetRollingUpgradePolicy(rollingUpgradePolicy *compute.RollingUpgradePolicy) []interface{} { + b := make(map[string]interface{}, 0) + + if v := rollingUpgradePolicy.MaxBatchInstancePercent; v != nil { + b["max_batch_instance_percent"] = *v + } + if v := rollingUpgradePolicy.MaxUnhealthyInstancePercent; v != nil { + b["max_unhealthy_instance_percent"] = *v + } + if v := rollingUpgradePolicy.MaxUnhealthyUpgradedInstancePercent; v != nil { + b["max_unhealthy_upgraded_instance_percent"] = *v + } + if v := rollingUpgradePolicy.PauseTimeBetweenBatches; v != nil { + b["pause_time_between_batches"] = *v + } + + return []interface{}{b} +} + func flattenAzureRmVirtualMachineScaleSetNetworkProfile(profile *compute.VirtualMachineScaleSetNetworkProfile) []map[string]interface{} { networkConfigurations := profile.NetworkInterfaceConfigurations result := make([]map[string]interface{}, 0, len(*networkConfigurations)) @@ -1446,6 +1540,19 @@ func expandVirtualMachineScaleSetSku(d *schema.ResourceData) (*compute.Sku, erro return sku, nil } +func expandAzureRmRollingUpgradePolicy(d *schema.ResourceData) *compute.RollingUpgradePolicy { + if config, ok := d.GetOk("rolling_upgrade_policy.0"); ok { + policy := config.(map[string]interface{}) + return &compute.RollingUpgradePolicy{ + MaxBatchInstancePercent: utils.Int32(int32(policy["max_batch_instance_percent"].(int))), + MaxUnhealthyInstancePercent: utils.Int32(int32(policy["max_unhealthy_instance_percent"].(int))), + MaxUnhealthyUpgradedInstancePercent: utils.Int32(int32(policy["max_unhealthy_upgraded_instance_percent"].(int))), + PauseTimeBetweenBatches: utils.String(policy["pause_time_between_batches"].(string)), + } + } + return nil +} + func expandAzureRmVirtualMachineScaleSetNetworkProfile(d *schema.ResourceData) *compute.VirtualMachineScaleSetNetworkProfile { scaleSetNetworkProfileConfigs := d.Get("network_profile").(*schema.Set).List() networkProfileConfig := make([]compute.VirtualMachineScaleSetNetworkConfiguration, 0, len(scaleSetNetworkProfileConfigs)) @@ -2024,3 +2131,29 @@ func flattenAzureRmVirtualMachineScaleSetPlan(plan *compute.Plan) []interface{} return []interface{}{result} } + +// When upgrade_policy_mode is not Rolling, we will just ignore rolling_upgrade_policy (returns true). +func azureRmVirtualMachineScaleSetSuppressRollingUpgradePolicyDiff(k, old, new string, d *schema.ResourceData) bool { + if k == "rolling_upgrade_policy.#" && new == "0" { + return strings.ToLower(d.Get("upgrade_policy_mode").(string)) != "rolling" + } + return false +} + +// Make sure rolling_upgrade_policy is default value when upgrade_policy_mode is not Rolling. +func azureRmVirtualMachineScaleSetCustomizeDiff(d *schema.ResourceDiff, _ interface{}) error { + mode := d.Get("upgrade_policy_mode").(string) + if strings.ToLower(mode) != "rolling" { + if policyRaw, ok := d.GetOk("rolling_upgrade_policy.0"); ok { + policy := policyRaw.(map[string]interface{}) + isDefault := (policy["max_batch_instance_percent"].(int) == 20) && + (policy["max_unhealthy_instance_percent"].(int) == 20) && + (policy["max_unhealthy_upgraded_instance_percent"].(int) == 20) && + (policy["pause_time_between_batches"] == "PT0S") + if !isDefault { + return fmt.Errorf("If `upgrade_policy_mode` is `%s`, `rolling_upgrade_policy` must be removed or set to default values", mode) + } + } + } + return nil +} diff --git a/azurerm/resource_arm_virtual_machine_scale_set_test.go b/azurerm/resource_arm_virtual_machine_scale_set_test.go index 624279c753e9..185a954a2e29 100644 --- a/azurerm/resource_arm_virtual_machine_scale_set_test.go +++ b/azurerm/resource_arm_virtual_machine_scale_set_test.go @@ -5,6 +5,7 @@ import ( "net/http" "regexp" "testing" + "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-06-01/compute" "github.com/hashicorp/terraform/helper/acctest" @@ -778,6 +779,92 @@ func TestAccAzureRMVirtualMachineScaleSet_multipleNetworkProfiles(t *testing.T) }) } +func TestAccAzureRMVirtualMachineScaleSet_AutoUpdates(t *testing.T) { + resourceName := "azurerm_virtual_machine_scale_set.test" + ri := acctest.RandInt() + config := testAccAzureRMVirtualMachineScaleSet_rollingAutoUpdates(ri, testLocation()) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineScaleSetExists(resourceName), + ), + }, + }, + }) +} + +func TestAccAzureRMVirtualMachineScaleSet_upgradeModeUpdate(t *testing.T) { + resourceName := "azurerm_virtual_machine_scale_set.test" + ri := acctest.RandInt() + location := testLocation() + manualConfig := testAccAzureRMVirtualMachineScaleSet_upgradeModeUpdate(ri, location, "Manual") + automaticConfig := testAccAzureRMVirtualMachineScaleSet_upgradeModeUpdate(ri, location, "Automatic") + rollingConfig := testAccAzureRMVirtualMachineScaleSet_upgradeModeUpdate(ri, location, "Rolling") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, + Steps: []resource.TestStep{ + { + Config: manualConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineScaleSetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "upgrade_policy_mode", "Manual"), + resource.TestCheckNoResourceAttr(resourceName, "rolling_upgrade_policy.#"), + ), + }, + { + Config: automaticConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineScaleSetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "upgrade_policy_mode", "Automatic"), + resource.TestCheckNoResourceAttr(resourceName, "rolling_upgrade_policy.#"), + ), + }, + { + Config: rollingConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineScaleSetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "upgrade_policy_mode", "Rolling"), + resource.TestCheckResourceAttr(resourceName, "rolling_upgrade_policy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rolling_upgrade_policy.0.max_batch_instance_percent", "21"), + resource.TestCheckResourceAttr(resourceName, "rolling_upgrade_policy.0.max_unhealthy_instance_percent", "22"), + resource.TestCheckResourceAttr(resourceName, "rolling_upgrade_policy.0.max_unhealthy_upgraded_instance_percent", "23"), + ), + }, + { + PreConfig: func() { time.Sleep(1 * time.Minute) }, // VM Scale Set updates are not allowed while there is a Rolling Upgrade in progress. + Config: automaticConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineScaleSetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "upgrade_policy_mode", "Automatic"), + resource.TestCheckResourceAttr(resourceName, "rolling_upgrade_policy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rolling_upgrade_policy.0.max_batch_instance_percent", "20"), + resource.TestCheckResourceAttr(resourceName, "rolling_upgrade_policy.0.max_unhealthy_instance_percent", "20"), + resource.TestCheckResourceAttr(resourceName, "rolling_upgrade_policy.0.max_unhealthy_upgraded_instance_percent", "20"), + ), + }, + { + Config: manualConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineScaleSetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "upgrade_policy_mode", "Manual"), + resource.TestCheckResourceAttr(resourceName, "rolling_upgrade_policy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rolling_upgrade_policy.0.max_batch_instance_percent", "20"), + resource.TestCheckResourceAttr(resourceName, "rolling_upgrade_policy.0.max_unhealthy_instance_percent", "20"), + resource.TestCheckResourceAttr(resourceName, "rolling_upgrade_policy.0.max_unhealthy_upgraded_instance_percent", "20"), + ), + }, + }, + }) +} + func TestAccAzureRMVirtualMachineScaleSet_importBasic_managedDisk_withZones(t *testing.T) { resourceName := "azurerm_virtual_machine_scale_set.test" @@ -4178,3 +4265,271 @@ resource "azurerm_virtual_machine_scale_set" "test" { } `, rInt, location) } + +func testAccAzureRMVirtualMachineScaleSet_rollingAutoUpdates(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%[1]d" + location = "%[2]s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctvn-%[1]d" + address_space = ["10.0.0.0/8"] + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" +} + +resource "azurerm_subnet" "test" { + name = "acctsub-%[1]d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.0.0.0/16" +} + +resource "azurerm_public_ip" "test" { + name = "acctestpip-%[1]d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + public_ip_address_allocation = "Dynamic" + idle_timeout_in_minutes = 4 +} + +resource "azurerm_lb" "test" { + name = "acctestlb-%[1]d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + frontend_ip_configuration { + name = "PublicIPAddress" + public_ip_address_id = "${azurerm_public_ip.test.id}" + } +} + +resource "azurerm_lb_rule" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + loadbalancer_id = "${azurerm_lb.test.id}" + name = "AccTestLBRule" + protocol = "Tcp" + frontend_port = 22 + backend_port = 22 + frontend_ip_configuration_name = "PublicIPAddress" + probe_id = "${azurerm_lb_probe.test.id}" + backend_address_pool_id = "${azurerm_lb_backend_address_pool.test.id}" +} + +resource "azurerm_lb_probe" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + loadbalancer_id = "${azurerm_lb.test.id}" + name = "acctest-lb-probe" + port = 22 + protocol = "Tcp" +} + +resource "azurerm_lb_backend_address_pool" "test" { + name = "acctestbapool" + resource_group_name = "${azurerm_resource_group.test.name}" + loadbalancer_id = "${azurerm_lb.test.id}" +} + +resource "azurerm_virtual_machine_scale_set" "test" { + name = "acctvmss-%[1]d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + upgrade_policy_mode = "Rolling" + automatic_os_upgrade = true + health_probe_id = "${azurerm_lb_probe.test.id}" + depends_on = ["azurerm_lb_rule.test"] + + rolling_upgrade_policy { + max_batch_instance_percent = 21 + max_unhealthy_instance_percent = 22 + max_unhealthy_upgraded_instance_percent = 23 + pause_time_between_batches = "PT30S" + } + + sku { + name = "Standard_F2" + tier = "Standard" + capacity = 1 + } + + os_profile { + computer_name_prefix = "testvm-%[1]d" + admin_username = "myadmin" + admin_password = "Passwword1234" + } + + network_profile { + name = "TestNetworkProfile" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + subnet_id = "${azurerm_subnet.test.id}" + load_balancer_backend_address_pool_ids = ["${azurerm_lb_backend_address_pool.test.id}"] + primary = true + } + } + + storage_profile_os_disk { + name = "" + caching = "ReadWrite" + create_option = "FromImage" + managed_disk_type = "Standard_LRS" + } + + storage_profile_data_disk { + lun = 0 + caching = "ReadWrite" + create_option = "Empty" + disk_size_gb = 10 + managed_disk_type = "Standard_LRS" + } + + storage_profile_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, rInt, location) +} + +func testAccAzureRMVirtualMachineScaleSet_upgradeModeUpdate(rInt int, location string, mode string) string { + policy := "" + if mode == "Rolling" { + policy = ` + rolling_upgrade_policy { + max_batch_instance_percent = 21 + max_unhealthy_instance_percent = 22 + max_unhealthy_upgraded_instance_percent = 23 + }` + } + + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%[1]d" + location = "%[2]s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctvn-%[1]d" + address_space = ["10.0.0.0/8"] + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" +} + +resource "azurerm_subnet" "test" { + name = "acctsub-%[1]d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.0.0.0/16" +} + +resource "azurerm_public_ip" "test" { + name = "acctestpip-%[1]d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + public_ip_address_allocation = "Dynamic" + idle_timeout_in_minutes = 4 +} + +resource "azurerm_lb" "test" { + name = "acctestlb-%[1]d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + frontend_ip_configuration { + name = "PublicIPAddress" + public_ip_address_id = "${azurerm_public_ip.test.id}" + } +} + +resource "azurerm_lb_rule" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + loadbalancer_id = "${azurerm_lb.test.id}" + name = "AccTestLBRule" + protocol = "Tcp" + frontend_port = 22 + backend_port = 22 + frontend_ip_configuration_name = "PublicIPAddress" + probe_id = "${azurerm_lb_probe.test.id}" + backend_address_pool_id = "${azurerm_lb_backend_address_pool.test.id}" +} + +resource "azurerm_lb_probe" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + loadbalancer_id = "${azurerm_lb.test.id}" + name = "acctest-lb-probe" + port = 22 + protocol = "Tcp" +} + +resource "azurerm_lb_backend_address_pool" "test" { + name = "acctestbapool" + resource_group_name = "${azurerm_resource_group.test.name}" + loadbalancer_id = "${azurerm_lb.test.id}" +} + +resource "azurerm_virtual_machine_scale_set" "test" { + name = "acctvmss-%[1]d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + upgrade_policy_mode = "%[3]s" + health_probe_id = "${azurerm_lb_probe.test.id}" + depends_on = ["azurerm_lb_rule.test"] + + %[4]s + + sku { + name = "Standard_F2" + tier = "Standard" + capacity = 1 + } + + os_profile { + computer_name_prefix = "testvm-%[1]d" + admin_username = "myadmin" + admin_password = "Passwword1234" + } + + network_profile { + name = "TestNetworkProfile" + primary = true + + ip_configuration { + name = "TestIPConfiguration" + subnet_id = "${azurerm_subnet.test.id}" + load_balancer_backend_address_pool_ids = ["${azurerm_lb_backend_address_pool.test.id}"] + primary = true + } + } + + storage_profile_os_disk { + name = "" + caching = "ReadWrite" + create_option = "FromImage" + managed_disk_type = "Standard_LRS" + } + + storage_profile_data_disk { + lun = 0 + caching = "ReadWrite" + create_option = "Empty" + disk_size_gb = 10 + managed_disk_type = "Standard_LRS" + } + + storage_profile_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } +} +`, rInt, location, mode, policy) +} diff --git a/examples/vmss-automatic-rolling-updates/README.md b/examples/vmss-automatic-rolling-updates/README.md new file mode 100644 index 000000000000..84d335bc5de3 --- /dev/null +++ b/examples/vmss-automatic-rolling-updates/README.md @@ -0,0 +1,22 @@ +# Linux VM Scale Set with Automatic OS upgrades and Rolling Upgrade Policy + +This template deploys a Linux (Ubuntu) VM Scale Set. Once the VMSS is deployed, the user can deploy an application inside each of the VMs (either by directly logging into the VMs or via a [`remote-exec` provisioner](https://www.terraform.io/docs/provisioners/remote-exec.html)). + +Please review the official documentation first : [Azure virtual machine scale set automatic OS upgrades](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-automatic-upgrade) + +## main.tf +The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables. + +## outputs.tf +This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command. + +You may leave the provider block in the `main.tf`, as it is in this template, or you can create a file called `provider.tf` and add it to your `.gitignore` file. + +Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file. + +## terraform.tfvars +If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it. + +## variables.tf +The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template. + diff --git a/examples/vmss-automatic-rolling-updates/main.tf b/examples/vmss-automatic-rolling-updates/main.tf new file mode 100644 index 000000000000..8dfecbe4f6f0 --- /dev/null +++ b/examples/vmss-automatic-rolling-updates/main.tf @@ -0,0 +1,144 @@ +resource "azurerm_resource_group" "rg" { + name = "${var.resource_group_name}" + location = "${var.location}" +} + +resource "azurerm_virtual_network" "vnet" { + name = "${var.resource_group_name}vnet" + location = "${azurerm_resource_group.rg.location}" + address_space = ["10.0.0.0/16"] + resource_group_name = "${azurerm_resource_group.rg.name}" +} + +resource "azurerm_subnet" "subnet" { + name = "subnet" + address_prefix = "10.0.0.0/24" + resource_group_name = "${azurerm_resource_group.rg.name}" + virtual_network_name = "${azurerm_virtual_network.vnet.name}" +} + +resource "azurerm_public_ip" "pip" { + name = "${var.hostname}-pip" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + public_ip_address_allocation = "Dynamic" + domain_name_label = "${var.hostname}" +} + +resource "azurerm_lb" "lb" { + name = "LoadBalancer" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + depends_on = ["azurerm_public_ip.pip"] + + frontend_ip_configuration { + name = "LBFrontEnd" + public_ip_address_id = "${azurerm_public_ip.pip.id}" + } +} + +resource "azurerm_lb_backend_address_pool" "backlb" { + name = "BackEndAddressPool" + resource_group_name = "${azurerm_resource_group.rg.name}" + loadbalancer_id = "${azurerm_lb.lb.id}" +} + +resource "azurerm_lb_probe" "prob" { + resource_group_name = "${azurerm_resource_group.test.name}" + loadbalancer_id = "${azurerm_lb.lb.id}" + name = "ssh-running-probe" + port = 22 + protocol = "Tcp" +} + +resource "azurerm_lb_rule" "lbr" { + resource_group_name = "${azurerm_resource_group.rg.name}" + loadbalancer_id = "${azurerm_lb.lb.id}" + name = "LBRule" + protocol = "Tcp" + frontend_port = 22 + backend_port = 22 + frontend_ip_configuration_name = "LBFrontEnd" + probe_id = "${azurerm_lb_probe.prob.id}" + backend_address_pool_id = "${azurerm_lb_backend_address_pool.backlb.id}" +} + +resource "azurerm_storage_account" "stor" { + name = "${var.resource_group_name}stor" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + account_tier = "${var.storage_account_tier}" + account_replication_type = "${var.storage_replication_type}" +} + +resource "azurerm_storage_container" "vhds" { + name = "vhds" + resource_group_name = "${azurerm_resource_group.rg.name}" + storage_account_name = "${azurerm_storage_account.stor.name}" + container_access_type = "blob" +} + +resource "azurerm_virtual_machine_scale_set" "scaleset" { + name = "autoscalewad" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + upgrade_policy_mode = "Manual" + overprovision = true + depends_on = ["azurerm_lb.lb", "azurerm_virtual_network.vnet"] + + upgrade_policy_mode = "Rolling" + + automatic_os_upgrade = true + + rolling_upgrade_policy { + max_batch_instance_percent = 20 + max_unhealthy_instance_percent = 20 + max_unhealthy_upgraded_instance_percent = 20 + pause_time_between_batches = "PT0S" + } + + health_probe_id = "${azurerm_lb_probe.prob.id}" + depends_on = ["azurerm_lb_rule.lbr"] + + sku { + name = "${var.vm_sku}" + tier = "Standard" + capacity = "${var.instance_count}" + } + + os_profile { + computer_name_prefix = "${var.vmss_name_prefix}" + admin_username = "${var.admin_username}" + admin_password = "${var.admin_password}" + } + + os_profile_linux_config { + disable_password_authentication = false + } + + network_profile { + name = "${var.hostname}-nic" + primary = true + + ip_configuration { + primary = true + name = "${var.hostname}ipconfig" + subnet_id = "${azurerm_subnet.subnet.id}" + load_balancer_backend_address_pool_ids = ["${azurerm_lb_backend_address_pool.backlb.id}"] + } + } + + storage_profile_os_disk { + name = "${var.hostname}" + caching = "ReadWrite" + create_option = "FromImage" + vhd_containers = ["${azurerm_storage_account.stor.primary_blob_endpoint}${azurerm_storage_container.vhds.name}"] + } + + storage_profile_image_reference { + publisher = "${var.image_publisher}" + offer = "${var.image_offer}" + sku = "${var.ubuntu_os_version}" + version = "latest" + } +} diff --git a/examples/vmss-automatic-rolling-updates/outputs.tf b/examples/vmss-automatic-rolling-updates/outputs.tf new file mode 100644 index 000000000000..668e48e29c18 --- /dev/null +++ b/examples/vmss-automatic-rolling-updates/outputs.tf @@ -0,0 +1,3 @@ +output "hostname" { + value = "${var.vmss_name_prefix}" +} diff --git a/examples/vmss-automatic-rolling-updates/variables.tf b/examples/vmss-automatic-rolling-updates/variables.tf new file mode 100644 index 000000000000..c8fbdc9e6684 --- /dev/null +++ b/examples/vmss-automatic-rolling-updates/variables.tf @@ -0,0 +1,66 @@ +# variable "subscription_id" {} +# variable "client_id" {} +# variable "client_secret" {} +# variable "tenant_id" {} + +variable "resource_group_name" { + description = "The name of the resource group in which to create the virtual network." + default = "tfex-vmss-ubuntu" +} + +variable "location" { + description = "The location/region where the virtual network is created. Changing this forces a new resource to be created." + default = "southcentralus" +} + +variable "storage_account_tier" { + description = "Defines the Tier of storage account to be created. Valid options are Standard and Premium." + default = "Standard" +} + +variable "storage_replication_type" { + description = "Defines the Replication Type to use for this storage account. Valid options include LRS, GRS etc." + default = "LRS" +} + +variable "hostname" { + description = "A string that determines the hostname/IP address of the origin server. This string could be a domain name, IPv4 address or IPv6 address." + default = "tfex-vmss-ubuntu" +} + +variable "vm_sku" { + description = "Size of VMs in the VM Scale Set." + default = "Standard_A1" +} + +variable "ubuntu_os_version" { + description = "The Ubuntu version for the VM. This will pick a fully patched image of this given Ubuntu version. Allowed values are: 15.10, 14.04.4-LTS." + default = "16.04.0-LTS" +} + +variable "image_publisher" { + description = "The name of the publisher of the image (az vm image list)" + default = "Canonical" +} + +variable "image_offer" { + description = "The name of the offer (az vm image list)" + default = "UbuntuServer" +} + +variable "vmss_name_prefix" { + description = "String used as a base for naming resources. Must be 1-9 characters in length for windows and 1-58 for linux images and globally unique across Azure. A hash is prepended to this string for some resources, and resource-specific information is appended." +} + +variable "instance_count" { + description = "Number of VM instances (100 or less)." + default = "5" +} + +variable "admin_username" { + description = "Admin username on all VMs." +} + +variable "admin_password" { + description = "Admin password on all VMs." +} diff --git a/examples/vmss-ubuntu/variables.tf b/examples/vmss-ubuntu/variables.tf index dd82d270b45f..c8fbdc9e6684 100644 --- a/examples/vmss-ubuntu/variables.tf +++ b/examples/vmss-ubuntu/variables.tf @@ -5,6 +5,7 @@ variable "resource_group_name" { description = "The name of the resource group in which to create the virtual network." + default = "tfex-vmss-ubuntu" } variable "location" { @@ -24,6 +25,7 @@ variable "storage_replication_type" { variable "hostname" { description = "A string that determines the hostname/IP address of the origin server. This string could be a domain name, IPv4 address or IPv6 address." + default = "tfex-vmss-ubuntu" } variable "vm_sku" { diff --git a/website/docs/r/virtual_machine_scale_set.html.markdown b/website/docs/r/virtual_machine_scale_set.html.markdown index 02c189f56d2e..f2ee433e220b 100644 --- a/website/docs/r/virtual_machine_scale_set.html.markdown +++ b/website/docs/r/virtual_machine_scale_set.html.markdown @@ -76,11 +76,32 @@ resource "azurerm_lb_nat_pool" "lbnatpool" { frontend_ip_configuration_name = "PublicIPAddress" } +resource "azurerm_lb_probe" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + loadbalancer_id = "${azurerm_lb.test.id}" + name = "http-probe" + request_path = "/health" + port = 8080 +} + resource "azurerm_virtual_machine_scale_set" "test" { name = "mytestscaleset-1" location = "${azurerm_resource_group.test.location}" resource_group_name = "${azurerm_resource_group.test.name}" - upgrade_policy_mode = "Manual" + + # automatic rolling upgrade + automatic_os_upgrade = true + upgrade_policy_mode = "Rolling" + rolling_upgrade_policy { + max_batch_instance_percent = 20 + max_unhealthy_instance_percent = 20 + max_unhealthy_upgraded_instance_percent = 5 + pause_time_between_batches = "PT0S" + } + + # required when using rolling upgrade policy + health_probe_id = "${azurerm_lb_probe.test.id}" + sku { name = "Standard_F2" @@ -244,10 +265,13 @@ The following arguments are supported: * `resource_group_name` - (Required) The name of the resource group in which to create the virtual machine scale set. Changing this forces a new resource to be created. * `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. * `sku` - (Required) A sku block as documented below. -* `upgrade_policy_mode` - (Required) Specifies the mode of an upgrade to virtual machines in the scale set. Possible values, `Manual` or `Automatic`. -* `overprovision` - (Optional) Specifies whether the virtual machine scale set should be overprovisioned. Defaults to `true`. -* `single_placement_group` - (Optional) Specifies whether the scale set is limited to a single placement group with a maximum size of 100 virtual machines. If set to false, managed disks must be used. Defaults to `true`. Changing this forces a - new resource to be created. See [documentation](http://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups) for more information. +* `upgrade_policy_mode` - (Required) Specifies the mode of an upgrade to virtual machines in the scale set. Possible values, `Rolling`, `Manual`, or `Automatic`. When choosing `Rolling`, you will need to set a health probe. +* `automatic_os_upgrade` - (Optional) Automatic OS patches can be applied by Azure to your scaleset. This is particularly useful when `upgrade_policy_mode` is set to `Rolling`. Defaults to `false`. +* `rolling_upgrade_policy` - (Optional) A `rolling_upgrade_policy` block as defined below. This is only applicable when the `upgrade_policy_mode` is `Rolling`. +* `health_probe_id` - (Optional) Specifies the identifier for the load balancer health probe. Required when using `Rolling` as your `upgrade_policy_mode`. +* `overprovision` - (Optional) Specifies whether the virtual machine scale set should be overprovisioned. +* `single_placement_group` - (Optional) Specifies whether the scale set is limited to a single placement group with a maximum size of 100 virtual machines. If set to false, managed disks must be used. Default is true. Changing this forces a + new resource to be created. See [documentation](http://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups) for more information. * `license_type` - (Optional, when a Windows machine) Specifies the Windows OS license type. If supplied, the only allowed values are `Windows_Client` and `Windows_Server`. * `os_profile` - (Required) A Virtual Machine OS Profile block as documented below. * `os_profile_secrets` - (Optional) A collection of Secret blocks as documented below. @@ -272,6 +296,13 @@ The following arguments are supported: * `tier` - (Optional) Specifies the tier of virtual machines in a scale set. Possible values, `standard` or `basic`. * `capacity` - (Required) Specifies the number of virtual machines in the scale set. +`rolling_upgrade_policy` supports the following: + +* `max_batch_instance_percent` - (Optional) The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. As this is a maximum, unhealthy instances in previous or future batches can cause the percentage of instances in a batch to decrease to ensure higher reliability. Defaults to `20`. +* `max_unhealthy_instance_percent` - (Optional) The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy, either as a result of being upgraded, or by being found in an unhealthy state by the virtual machine health checks before the rolling upgrade aborts. This constraint will be checked prior to starting any batch. Defaults to `20`. +* `max_unhealthy_upgraded_instance_percent` - (Optional) The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. This check will happen after each batch is upgraded. If this percentage is ever exceeded, the rolling update aborts. Defaults to `20`. +* `pause_time_between_batches` - (Optional) The wait time between completing the update for all virtual machines in one batch and starting the next batch. The time duration should be specified in ISO 8601 format for duration (https://en.wikipedia.org/wiki/ISO_8601#Durations). Defaults to `0` seconds represented as `PT0S`. + `identity` supports the following: * `type` - (Required) Specifies the identity type to be assigned to the scale set. Allowable values are `SystemAssigned` and `UserAssigned`. To enable Managed Service Identity (MSI) on all machines in the scale set, an extension with the type "ManagedIdentityExtensionForWindows" or "ManagedIdentityExtensionForLinux" must also be added. For the `SystemAssigned` identity the scale set's Service Principal ID (SPN) can be retrieved after the scale set has been created. See [documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/overview) for more information. @@ -393,7 +424,7 @@ output "principal_id" { * `caching` - (Optional) Specifies the caching requirements. Possible values include: `None` (default), `ReadOnly`, `ReadWrite`. * `image` - (Optional) Specifies the blob uri for user image. A virtual machine scale set creates an os disk in the same container as the user image. Updating the osDisk image causes the existing disk to be deleted and a new one created with the new image. If the VM scale set is in Manual upgrade mode then the virtual machines are not updated until they have manualUpgrade applied to them. - When setting this field `os_type` needs to be specified. Cannot be used when `vhd_containers`, `managed_disk_type` or `storage_profile_image_reference ` are specified. + When setting this field `os_type` needs to be specified. Cannot be used when `vhd_containers`, `managed_disk_type` or `storage_profile_image_reference` are specified. * `os_type` - (Optional) Specifies the operating system Type, valid values are windows, linux. `storage_profile_data_disk` supports the following: