From 3a043cadd6bebdd6cc5488b43dabce5404e0a2b2 Mon Sep 17 00:00:00 2001 From: Mayank Hirani Date: Fri, 8 Dec 2023 12:37:46 -0800 Subject: [PATCH 01/12] Push all kx volume changes for PR. --- internal/service/finspace/kx_volume.go | 504 ++++++++++++++++++ internal/service/finspace/kx_volume_test.go | 278 ++++++++++ .../service/finspace/service_package_gen.go | 8 + .../docs/r/finspace_kx_volume.html.markdown | 98 ++++ 4 files changed, 888 insertions(+) create mode 100644 internal/service/finspace/kx_volume.go create mode 100644 internal/service/finspace/kx_volume_test.go create mode 100644 website/docs/r/finspace_kx_volume.html.markdown diff --git a/internal/service/finspace/kx_volume.go b/internal/service/finspace/kx_volume.go new file mode 100644 index 000000000000..0edbc919ffc2 --- /dev/null +++ b/internal/service/finspace/kx_volume.go @@ -0,0 +1,504 @@ +// // Copyright (c) HashiCorp, Inc. +// // SPDX-License-Identifier: MPL-2.0 +package finspace + +import ( + "context" + "errors" + "log" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/finspace" + "github.com/aws/aws-sdk-go-v2/service/finspace/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @SDKResource("aws_finspace_kx_volume", name="Kx Volume") +// @Tags(identifierAttribute="arn") +func ResourceKxVolume() *schema.Resource { + return &schema.Resource{ + CreateWithoutTimeout: resourceKxVolumeCreate, + ReadWithoutTimeout: resourceKxVolumeRead, + UpdateWithoutTimeout: resourceKxVolumeUpdate, + DeleteWithoutTimeout: resourceKxVolumeDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(45 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + "availability_zones": { + Type: schema.TypeList, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Required: true, + ForceNew: true, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "az_mode": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxAzMode](), + }, + "environment_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 32), + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxVolumeType](), + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 1000), + }, + "nas1_configuration": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(1200, 33600), + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxNAS1Type](), + }, + }, + }, + }, + "created_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "last_modified_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "status_reason": { + Type: schema.TypeString, + Computed: true, + }, + "attached_clusters": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "cluster_status": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxClusterStatus](), + }, + "cluster_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxClusterType](), + }, + }, + }, + Computed: true, + }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), + }, + CustomizeDiff: verify.SetTagsDiff, + } +} + +const ( + ResNameKxVolume = "Kx Volume" + kxVolumeIDPartCount = 2 +) + +func resourceKxVolumeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + environmentId := d.Get("environment_id").(string) + volumeName := d.Get("name").(string) + idParts := []string{ + environmentId, + volumeName, + } + rID, err := flex.FlattenResourceId(idParts, kxVolumeIDPartCount, false) + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxVolume, d.Get("name").(string), err)...) + } + d.SetId(rID) + + in := &finspace.CreateKxVolumeInput{ + ClientToken: aws.String(id.UniqueId()), + AvailabilityZoneIds: flex.ExpandStringValueList(d.Get("availability_zones").([]interface{})), + EnvironmentId: aws.String(environmentId), + VolumeType: types.KxVolumeType(d.Get("type").(string)), + VolumeName: aws.String(volumeName), + AzMode: types.KxAzMode(d.Get("az_mode").(string)), + Tags: getTagsIn(ctx), + } + + if v, ok := d.GetOk("description"); ok { + in.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("nas1_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + in.Nas1Configuration = expandNas1Configuration(v.([]interface{})) + } + + // TODO: add flatten/expand functions for remaining parameters + + out, err := conn.CreateKxVolume(ctx, in) + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Get("name").(string), err)...) + } + + if out == nil || out.VolumeName == nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Get("name").(string), errors.New("empty output"))...) + } + + if _, err := waitKxVolumeCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxVolume, d.Id(), err)...) + } + + // The CreateKxVolume API currently fails to tag the Volume when the + // Tags field is set. Until the API is fixed, tag after creation instead. + if err := createTags(ctx, conn, aws.ToString(out.VolumeArn), getTagsIn(ctx)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Id(), err)...) + } + + return append(diags, resourceKxVolumeRead(ctx, d, meta)...) +} + +func resourceKxVolumeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + out, err := findKxVolumeByID(ctx, conn, d.Id()) + + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] FinSpace KxVolume (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags + } + + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionReading, ResNameKxVolume, d.Id(), err)...) + } + + d.Set("arn", out.VolumeArn) + d.Set("name", out.VolumeName) + d.Set("description", out.Description) + d.Set("type", out.VolumeType) + d.Set("status", out.Status) + d.Set("status_reason", out.StatusReason) + d.Set("az_mode", out.AzMode) + d.Set("description", out.Description) + d.Set("created_timestamp", out.CreatedTimestamp.String()) + d.Set("last_modified_timestamp", out.LastModifiedTimestamp.String()) + d.Set("availability_zones", aws.StringSlice(out.AvailabilityZoneIds)) + + if err := d.Set("nas1_configuration", flattenNas1Configuration(out.Nas1Configuration)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err)...) + } + + if err := d.Set("attached_clusters", flattenAttachedClusters(out.AttachedClusters)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err)...) + } + + parts, err := flex.ExpandResourceId(d.Id(), kxVolumeIDPartCount, false) + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err)...) + } + d.Set("environment_id", parts[0]) + + return diags +} + +func resourceKxVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + updateVolume := false + + in := &finspace.UpdateKxVolumeInput{ + EnvironmentId: aws.String(d.Get("environment_id").(string)), + VolumeName: aws.String(d.Get("name").(string)), + } + + if v, ok := d.GetOk("description"); ok && d.HasChanges("description") { + in.Description = aws.String(v.(string)) + updateVolume = true + } + + if v, ok := d.GetOk("nas1_configuration"); ok && len(v.([]interface{})) > 0 && d.HasChanges("nas1_configuration") { + in.Nas1Configuration = expandNas1Configuration(v.([]interface{})) + updateVolume = true + } + + if !updateVolume { + return diags + } + + log.Printf("[DEBUG] Updating FinSpace KxVolume (%s): %#v", d.Id(), in) + + if _, err := conn.UpdateKxVolume(ctx, in); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionUpdating, ResNameKxVolume, d.Id(), err)...) + } + if _, err := waitKxVolumeUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionUpdating, ResNameKxVolume, d.Id(), err)...) + } + + return append(diags, resourceKxVolumeRead(ctx, d, meta)...) +} + +func resourceKxVolumeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + log.Printf("[INFO] Deleting FinSpace Kx Volume: %s", d.Id()) + _, err := conn.DeleteKxVolume(ctx, &finspace.DeleteKxVolumeInput{ + VolumeName: aws.String(d.Get("name").(string)), + EnvironmentId: aws.String(d.Get("environment_id").(string)), + }) + + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return diags + } + + return append(diags, create.DiagError(names.FinSpace, create.ErrActionDeleting, ResNameKxVolume, d.Id(), err)...) + } + + _, err = waitKxVolumeDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)) + if err != nil && !tfresource.NotFound(err) { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForDeletion, ResNameKxVolume, d.Id(), err)...) + } + + return diags +} + +func waitKxVolumeCreated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxVolumeOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxVolumeStatusCreating), + Target: enum.Slice(types.KxVolumeStatusActive), + Refresh: statusKxVolume(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxVolumeOutput); ok { + return out, err + } + + return nil, err +} + +func waitKxVolumeUpdated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxVolumeOutput, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxVolumeStatusCreating, types.KxVolumeStatusUpdating), + Target: enum.Slice(types.KxVolumeStatusActive), + Refresh: statusKxVolume(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxVolumeOutput); ok { + return out, err + } + + return nil, err +} + +func waitKxVolumeDeleted(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxVolumeOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxVolumeStatusDeleting), + Target: enum.Slice(types.KxVolumeStatusDeleted), + Refresh: statusKxVolume(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxVolumeOutput); ok { + return out, err + } + + return nil, err +} + +func statusKxVolume(ctx context.Context, conn *finspace.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + out, err := findKxVolumeByID(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return out, string(out.Status), nil + } +} + +func findKxVolumeByID(ctx context.Context, conn *finspace.Client, id string) (*finspace.GetKxVolumeOutput, error) { + parts, err := flex.ExpandResourceId(id, kxVolumeIDPartCount, false) + if err != nil { + return nil, err + } + + in := &finspace.GetKxVolumeInput{ + EnvironmentId: aws.String(parts[0]), + VolumeName: aws.String(parts[1]), + } + + out, err := conn.GetKxVolume(ctx, in) + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: in, + } + } + + return nil, err + } + + if out == nil || out.VolumeArn == nil { + return nil, tfresource.NewEmptyResultError(in) + } + + return out, nil +} + +func expandNas1Configuration(tfList []interface{}) *types.KxNAS1Configuration { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]interface{}) + + a := &types.KxNAS1Configuration{} + + if v, ok := tfMap["size"].(int); ok && v != 0 { + a.Size = aws.Int32(int32(v)) + } + + if v, ok := tfMap["type"].(string); ok && v != "" { + a.Type = types.KxNAS1Type(v) + } + return a +} + +func flattenNas1Configuration(apiObject *types.KxNAS1Configuration) []interface{} { + if apiObject == nil { + return nil + } + + m := map[string]interface{}{} + + if v := apiObject.Size; v != nil { + m["size"] = aws.ToInt32(v) + } + + if v := apiObject.Type; v != "" { + m["type"] = v + } + + return []interface{}{m} +} + +func flattenCluster(apiObject *types.KxAttachedCluster) map[string]interface{} { + if apiObject == nil { + return nil + } + + m := map[string]interface{}{} + + if v := apiObject.ClusterName; aws.ToString(v) != "" { + m["cluster_name"] = aws.ToString(v) + } + + if v := apiObject.ClusterStatus; v != "" { + m["cluster_status"] = string(v) + } + + if v := apiObject.ClusterType; v != "" { + m["cluster_type"] = string(v) + } + + return m +} + +func flattenAttachedClusters(apiObjects []types.KxAttachedCluster) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var l []interface{} + + for _, apiObject := range apiObjects { + l = append(l, flattenCluster(&apiObject)) + } + + return l +} diff --git a/internal/service/finspace/kx_volume_test.go b/internal/service/finspace/kx_volume_test.go new file mode 100644 index 000000000000..520c918c1d0e --- /dev/null +++ b/internal/service/finspace/kx_volume_test.go @@ -0,0 +1,278 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package finspace_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/finspace" + "github.com/aws/aws-sdk-go-v2/service/finspace/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tffinspace "github.com/hashicorp/terraform-provider-aws/internal/service/finspace" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccFinSpaceKxVolume_basic(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var KxVolume finspace.GetKxVolumeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_volume.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxVolumeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxVolumeConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxVolumeExists(ctx, resourceName, &KxVolume), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxVolumeStatusActive)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccFinSpaceKxVolume_dissappears(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var KxVolume finspace.GetKxVolumeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_volume.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxVolumeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxVolumeConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxVolumeExists(ctx, resourceName, &KxVolume), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tffinspace.ResourceKxVolume(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckKxVolumeDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_finspace_kx_volume" { + continue + } + + input := &finspace.GetKxVolumeInput{ + VolumeName: aws.String(rs.Primary.Attributes["name"]), + EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), + } + _, err := conn.GetKxVolume(ctx, input) + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return nil + } + return err + } + + return create.Error(names.FinSpace, create.ErrActionCheckingDestroyed, tffinspace.ResNameKxVolume, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func testAccKxVolumeConfig_basic(rName string) string { + return acctest.ConfigCompose( + testAccKxVolumeConfigBase(rName), + fmt.Sprintf(` + resource "aws_finspace_kx_volume" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type= "SSD_250" + size= 1200 + } + } + `, rName)) +} + +func testAccKxVolumeConfigBase(rName string) string { + return fmt.Sprintf(` + data "aws_caller_identity" "current" {} + data "aws_partition" "current" {} + + output "account_id" { + value = data.aws_caller_identity.current.account_id + } + + resource "aws_kms_key" "test" { + deletion_window_in_days = 7 + } + + resource "aws_finspace_kx_environment" "test" { + name = %[1]q + kms_key_id = aws_kms_key.test.arn + } + + data "aws_iam_policy_document" "key_policy" { + statement { + actions = [ + "kms:Decrypt", + "kms:GenerateDataKey" + ] + + resources = [ + aws_kms_key.test.arn, + ] + + principals { + type = "Service" + identifiers = ["finspace.amazonaws.com"] + } + + condition { + test = "ArnLike" + variable = "aws:SourceArn" + values = ["${aws_finspace_kx_environment.test.arn}/*"] + } + + condition { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [data.aws_caller_identity.current.account_id] + } + } + + statement { + actions = [ + "kms:*", + ] + + resources = [ + "*", + ] + + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] + } + } + } + + resource "aws_kms_key_policy" "test" { + key_id = aws_kms_key.test.id + policy = data.aws_iam_policy_document.key_policy.json + } + + resource "aws_vpc" "test" { + cidr_block = "172.31.0.0/16" + enable_dns_hostnames = true + } + + resource "aws_subnet" "test" { + vpc_id = aws_vpc.test.id + cidr_block = "172.31.32.0/20" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + } + + resource "aws_security_group" "test" { + name = %[1]q + vpc_id = aws_vpc.test.id + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + } + + resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id + } + + data "aws_route_tables" "rts" { + vpc_id = aws_vpc.test.id + } + + resource "aws_route" "r" { + route_table_id = tolist(data.aws_route_tables.rts.ids)[0] + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } + `, rName) +} + +func testAccCheckKxVolumeExists(ctx context.Context, name string, KxVolume *finspace.GetKxVolumeOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + resp, err := conn.GetKxVolume(ctx, &finspace.GetKxVolumeInput{ + VolumeName: aws.String(rs.Primary.Attributes["name"]), + EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), + }) + + if err != nil { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, rs.Primary.ID, err) + } + + *KxVolume = *resp + + return nil + } +} diff --git a/internal/service/finspace/service_package_gen.go b/internal/service/finspace/service_package_gen.go index 42b687b450ea..9fb0005d8443 100644 --- a/internal/service/finspace/service_package_gen.go +++ b/internal/service/finspace/service_package_gen.go @@ -60,6 +60,14 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka IdentifierAttribute: "arn", }, }, + { + Factory: ResourceKxVolume, + TypeName: "aws_finspace_kx_volume", + Name: "Kx Volume", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: "arn", + }, + }, } } diff --git a/website/docs/r/finspace_kx_volume.html.markdown b/website/docs/r/finspace_kx_volume.html.markdown new file mode 100644 index 000000000000..71e855f1bc46 --- /dev/null +++ b/website/docs/r/finspace_kx_volume.html.markdown @@ -0,0 +1,98 @@ +--- +subcategory: "FinSpace" +layout: "aws" +page_title: "AWS: aws_finspace_kx_volume" +description: |- + Terraform resource for managing an AWS FinSpace Kx Volume. +--- + +# Resource: aws_finspace_kx_volume + +Terraform resource for managing an AWS FinSpace Kx Volume. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_finspace_kx_volume" "example" { + name = "my-tf-kx-volume" + environment_id = aws_finspace_kx_environment.example.id + availability_zones = "use1-az2" + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type= "SSD_250" + size= 1200 + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `az_mode` - (Required) The number of availability zones you want to assign per volume. Currently, Finspace only support SINGLE for volumes. + * SINGLE - Assigns one availability zone per volume. +* `environment_id` - (Required) A unique identifier for the kdb environment, whose clusters can attach to the volume. +* `name` - (Required) Unique name for the volumr that you want to create. +* `type` - (Required) The type of file system volume. Currently, FinSpace only supports NAS_1 volume type. When you select NAS_1 volume type, you must also provide nas1Configuration. +* `availability_zones` - (Required) The identifier of the AWS Availability Zone IDs. + +The following arguments are optional: + +* `nas1_configuration` - (Optional) Specifies the configuration for the Network attached storage (NAS_1) file system volume. This parameter is required when you choose volumeType as NAS_1. +* `description` - (Optional) Description of the volume. +* `tags` - (Optional) A list of key-value pairs to label the volume. You can add up to 50 tags to a volume + + +### nas1_configuration + +The nas1_configuration block supports the following arguments: + +* `size` - (Required) The size of the network attached storage. +* `security_group_ids` - (Required) The type of the network attached storage. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - Amazon Resource Name (ARN) identifier of the KX volume. +* `created_timestamp` - The timestamp at which the volume was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. +* `status` - The status of volume creation. + * CREATING – The volume creation is in progress. + * CREATE_FAILED – The volume creation has failed. + * ACTIVE – The volume is active. + * UPDATING – The volume is in the process of being updated. + * UPDATE_FAILED – The update action failed. + * UPDATED – The volume is successfully updated. + * DELETING – The volume is in the process of being deleted. + * DELETE_FAILED – The system failed to delete the volume. + * DELETED – The volume is successfully deleted. +* `status_reason` - The error message when a failed state occurs. +* `last_modified_timestamp` - Last timestamp at which the volume was updated in FinSpace. Value determined as epoch time in seconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `45m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import an AWS FinSpace Kx Volume using the `id` (environment ID and volume name, comma-delimited). For example: + +```terraform +import { + to = aws_finspace_kx_volume.example + id = "n3ceo7wqxoxcti5tujqwzs,my-tf-kx-volume" +} +``` + +Using `terraform import`, import an AWS FinSpace Kx Volume using the `id` (environment ID and volume name, comma-delimited). For example: + +```console +% terraform import aws_finspace_kx_volume.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-volume +``` From 76afd366616159260a7326f126d95e6b3bc5e7b1 Mon Sep 17 00:00:00 2001 From: Mayank Hirani Date: Wed, 13 Dec 2023 18:37:14 -0500 Subject: [PATCH 02/12] Fix acceptance test linting and doc issue. --- internal/service/finspace/kx_volume_test.go | 208 +++++++++--------- .../docs/r/finspace_kx_volume.html.markdown | 1 - 2 files changed, 104 insertions(+), 105 deletions(-) diff --git a/internal/service/finspace/kx_volume_test.go b/internal/service/finspace/kx_volume_test.go index 520c918c1d0e..52008f1e25e4 100644 --- a/internal/service/finspace/kx_volume_test.go +++ b/internal/service/finspace/kx_volume_test.go @@ -122,132 +122,132 @@ func testAccKxVolumeConfig_basic(rName string) string { return acctest.ConfigCompose( testAccKxVolumeConfigBase(rName), fmt.Sprintf(` - resource "aws_finspace_kx_volume" "test" { - name = %[1]q - environment_id = aws_finspace_kx_environment.test.id - availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] - az_mode = "SINGLE" - type = "NAS_1" - nas1_configuration { - type= "SSD_250" - size= 1200 - } - } - `, rName)) +resource "aws_finspace_kx_volume" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type= "SSD_250" + size= 1200 + } +} +`, rName)) } func testAccKxVolumeConfigBase(rName string) string { return fmt.Sprintf(` - data "aws_caller_identity" "current" {} - data "aws_partition" "current" {} +data "aws_caller_identity" "current" {} +data "aws_partition" "current" {} - output "account_id" { - value = data.aws_caller_identity.current.account_id - } +output "account_id" { + value = data.aws_caller_identity.current.account_id +} - resource "aws_kms_key" "test" { - deletion_window_in_days = 7 - } +resource "aws_kms_key" "test" { + deletion_window_in_days = 7 +} - resource "aws_finspace_kx_environment" "test" { - name = %[1]q - kms_key_id = aws_kms_key.test.arn - } +resource "aws_finspace_kx_environment" "test" { + name = %[1]q + kms_key_id = aws_kms_key.test.arn +} - data "aws_iam_policy_document" "key_policy" { - statement { - actions = [ - "kms:Decrypt", - "kms:GenerateDataKey" - ] +data "aws_iam_policy_document" "key_policy" { + statement { + actions = [ + "kms:Decrypt", + "kms:GenerateDataKey" + ] - resources = [ - aws_kms_key.test.arn, - ] + resources = [ + aws_kms_key.test.arn, + ] - principals { - type = "Service" - identifiers = ["finspace.amazonaws.com"] - } - - condition { - test = "ArnLike" - variable = "aws:SourceArn" - values = ["${aws_finspace_kx_environment.test.arn}/*"] - } - - condition { - test = "StringEquals" - variable = "aws:SourceAccount" - values = [data.aws_caller_identity.current.account_id] - } - } - - statement { - actions = [ - "kms:*", - ] - - resources = [ - "*", - ] - - principals { - type = "AWS" - identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] - } - } + principals { + type = "Service" + identifiers = ["finspace.amazonaws.com"] } - resource "aws_kms_key_policy" "test" { - key_id = aws_kms_key.test.id - policy = data.aws_iam_policy_document.key_policy.json + condition { + test = "ArnLike" + variable = "aws:SourceArn" + values = ["${aws_finspace_kx_environment.test.arn}/*"] } - - resource "aws_vpc" "test" { - cidr_block = "172.31.0.0/16" - enable_dns_hostnames = true + + condition { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [data.aws_caller_identity.current.account_id] } + } - resource "aws_subnet" "test" { - vpc_id = aws_vpc.test.id - cidr_block = "172.31.32.0/20" - availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + statement { + actions = [ + "kms:*", + ] + + resources = [ + "*", + ] + + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] } + } +} + +resource "aws_kms_key_policy" "test" { + key_id = aws_kms_key.test.id + policy = data.aws_iam_policy_document.key_policy.json +} - resource "aws_security_group" "test" { - name = %[1]q - vpc_id = aws_vpc.test.id +resource "aws_vpc" "test" { + cidr_block = "172.31.0.0/16" + enable_dns_hostnames = true +} - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } +resource "aws_subnet" "test" { + vpc_id = aws_vpc.test.id + cidr_block = "172.31.32.0/20" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] +} - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - } +resource "aws_security_group" "test" { + name = %[1]q + vpc_id = aws_vpc.test.id + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} - resource "aws_internet_gateway" "test" { - vpc_id = aws_vpc.test.id - } +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} - data "aws_route_tables" "rts" { - vpc_id = aws_vpc.test.id - } +data "aws_route_tables" "rts" { + vpc_id = aws_vpc.test.id +} - resource "aws_route" "r" { - route_table_id = tolist(data.aws_route_tables.rts.ids)[0] - destination_cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.test.id - } - `, rName) +resource "aws_route" "r" { + route_table_id = tolist(data.aws_route_tables.rts.ids)[0] + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id +} +`, rName) } func testAccCheckKxVolumeExists(ctx context.Context, name string, KxVolume *finspace.GetKxVolumeOutput) resource.TestCheckFunc { diff --git a/website/docs/r/finspace_kx_volume.html.markdown b/website/docs/r/finspace_kx_volume.html.markdown index 71e855f1bc46..b573a81efddf 100644 --- a/website/docs/r/finspace_kx_volume.html.markdown +++ b/website/docs/r/finspace_kx_volume.html.markdown @@ -45,7 +45,6 @@ The following arguments are optional: * `description` - (Optional) Description of the volume. * `tags` - (Optional) A list of key-value pairs to label the volume. You can add up to 50 tags to a volume - ### nas1_configuration The nas1_configuration block supports the following arguments: From 96399e660de24c132af31bf300cd36dd13b9e910 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:35:17 -0500 Subject: [PATCH 03/12] r/aws_finspace_kx_volume: prefer create.AppendDiagError --- internal/service/finspace/kx_volume.go | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/internal/service/finspace/kx_volume.go b/internal/service/finspace/kx_volume.go index 0edbc919ffc2..c76aa3b1f7b1 100644 --- a/internal/service/finspace/kx_volume.go +++ b/internal/service/finspace/kx_volume.go @@ -177,7 +177,7 @@ func resourceKxVolumeCreate(ctx context.Context, d *schema.ResourceData, meta in } rID, err := flex.FlattenResourceId(idParts, kxVolumeIDPartCount, false) if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxVolume, d.Get("name").(string), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxVolume, d.Get("name").(string), err) } d.SetId(rID) @@ -203,21 +203,21 @@ func resourceKxVolumeCreate(ctx context.Context, d *schema.ResourceData, meta in out, err := conn.CreateKxVolume(ctx, in) if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Get("name").(string), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Get("name").(string), err) } if out == nil || out.VolumeName == nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Get("name").(string), errors.New("empty output"))...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Get("name").(string), errors.New("empty output")) } if _, err := waitKxVolumeCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxVolume, d.Id(), err) } // The CreateKxVolume API currently fails to tag the Volume when the // Tags field is set. Until the API is fixed, tag after creation instead. if err := createTags(ctx, conn, aws.ToString(out.VolumeArn), getTagsIn(ctx)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Id(), err) } return append(diags, resourceKxVolumeRead(ctx, d, meta)...) @@ -236,7 +236,7 @@ func resourceKxVolumeRead(ctx context.Context, d *schema.ResourceData, meta inte } if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionReading, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionReading, ResNameKxVolume, d.Id(), err) } d.Set("arn", out.VolumeArn) @@ -252,16 +252,16 @@ func resourceKxVolumeRead(ctx context.Context, d *schema.ResourceData, meta inte d.Set("availability_zones", aws.StringSlice(out.AvailabilityZoneIds)) if err := d.Set("nas1_configuration", flattenNas1Configuration(out.Nas1Configuration)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err) } if err := d.Set("attached_clusters", flattenAttachedClusters(out.AttachedClusters)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err) } parts, err := flex.ExpandResourceId(d.Id(), kxVolumeIDPartCount, false) if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err) } d.Set("environment_id", parts[0]) @@ -296,10 +296,10 @@ func resourceKxVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta in log.Printf("[DEBUG] Updating FinSpace KxVolume (%s): %#v", d.Id(), in) if _, err := conn.UpdateKxVolume(ctx, in); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionUpdating, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionUpdating, ResNameKxVolume, d.Id(), err) } if _, err := waitKxVolumeUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionUpdating, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionUpdating, ResNameKxVolume, d.Id(), err) } return append(diags, resourceKxVolumeRead(ctx, d, meta)...) @@ -321,12 +321,12 @@ func resourceKxVolumeDelete(ctx context.Context, d *schema.ResourceData, meta in return diags } - return append(diags, create.DiagError(names.FinSpace, create.ErrActionDeleting, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionDeleting, ResNameKxVolume, d.Id(), err) } _, err = waitKxVolumeDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)) if err != nil && !tfresource.NotFound(err) { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForDeletion, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionWaitingForDeletion, ResNameKxVolume, d.Id(), err) } return diags From 58ea0a7f9bcfe3225cd19b90a17e96608a7c8de9 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:39:11 -0500 Subject: [PATCH 04/12] r/aws_finspace_kx_volume: alphabetize attributes, fix conn init --- internal/service/finspace/kx_volume.go | 118 ++++++++++++------------- 1 file changed, 57 insertions(+), 61 deletions(-) diff --git a/internal/service/finspace/kx_volume.go b/internal/service/finspace/kx_volume.go index c76aa3b1f7b1..c550a4ead2f0 100644 --- a/internal/service/finspace/kx_volume.go +++ b/internal/service/finspace/kx_volume.go @@ -16,7 +16,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/enum" @@ -47,7 +46,36 @@ func ResourceKxVolume() *schema.Resource { }, Schema: map[string]*schema.Schema{ - + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "attached_clusters": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "cluster_status": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxClusterStatus](), + }, + "cluster_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxClusterType](), + }, + }, + }, + Computed: true, + }, "availability_zones": { Type: schema.TypeList, Elem: &schema.Schema{ @@ -56,39 +84,31 @@ func ResourceKxVolume() *schema.Resource { Required: true, ForceNew: true, }, - "arn": { - Type: schema.TypeString, - Computed: true, - }, "az_mode": { Type: schema.TypeString, Required: true, ForceNew: true, ValidateDiagFunc: enum.Validate[types.KxAzMode](), }, - "environment_id": { + "created_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "description": { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, - ValidateFunc: validation.StringLenBetween(1, 32), + ValidateFunc: validation.StringLenBetween(1, 1000), }, - "name": { + "environment_id": { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validation.StringLenBetween(3, 63), - }, - "type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateDiagFunc: enum.Validate[types.KxVolumeType](), + ValidateFunc: validation.StringLenBetween(1, 32), }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringLenBetween(1, 1000), + "last_modified_timestamp": { + Type: schema.TypeString, + Computed: true, }, "nas1_configuration": { Type: schema.TypeList, @@ -111,13 +131,11 @@ func ResourceKxVolume() *schema.Resource { }, }, }, - "created_timestamp": { - Type: schema.TypeString, - Computed: true, - }, - "last_modified_timestamp": { - Type: schema.TypeString, - Computed: true, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), }, "status": { Type: schema.TypeString, @@ -127,34 +145,14 @@ func ResourceKxVolume() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "attached_clusters": { - Type: schema.TypeList, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cluster_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringLenBetween(3, 63), - }, - "cluster_status": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateDiagFunc: enum.Validate[types.KxClusterStatus](), - }, - "cluster_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateDiagFunc: enum.Validate[types.KxClusterType](), - }, - }, - }, - Computed: true, - }, names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxVolumeType](), + }, }, CustomizeDiff: verify.SetTagsDiff, } @@ -167,7 +165,7 @@ const ( func resourceKxVolumeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) environmentId := d.Get("environment_id").(string) volumeName := d.Get("name").(string) @@ -199,8 +197,6 @@ func resourceKxVolumeCreate(ctx context.Context, d *schema.ResourceData, meta in in.Nas1Configuration = expandNas1Configuration(v.([]interface{})) } - // TODO: add flatten/expand functions for remaining parameters - out, err := conn.CreateKxVolume(ctx, in) if err != nil { return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Get("name").(string), err) @@ -225,7 +221,7 @@ func resourceKxVolumeCreate(ctx context.Context, d *schema.ResourceData, meta in func resourceKxVolumeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) out, err := findKxVolumeByID(ctx, conn, d.Id()) @@ -270,7 +266,7 @@ func resourceKxVolumeRead(ctx context.Context, d *schema.ResourceData, meta inte func resourceKxVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) updateVolume := false @@ -307,7 +303,7 @@ func resourceKxVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta in func resourceKxVolumeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) log.Printf("[INFO] Deleting FinSpace Kx Volume: %s", d.Id()) _, err := conn.DeleteKxVolume(ctx, &finspace.DeleteKxVolumeInput{ From cecc3f2886187882451e0aaf9765354e6e6b20b1 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:41:28 -0500 Subject: [PATCH 05/12] r/aws_finspace_kx_volume(test): fix disappears test name --- internal/service/finspace/kx_volume_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/finspace/kx_volume_test.go b/internal/service/finspace/kx_volume_test.go index 52008f1e25e4..84dade247bd7 100644 --- a/internal/service/finspace/kx_volume_test.go +++ b/internal/service/finspace/kx_volume_test.go @@ -58,7 +58,7 @@ func TestAccFinSpaceKxVolume_basic(t *testing.T) { }) } -func TestAccFinSpaceKxVolume_dissappears(t *testing.T) { +func TestAccFinSpaceKxVolume_disappears(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } From fbb6fd67133021384ac24fba1b042514ca61ef5c Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:45:40 -0500 Subject: [PATCH 06/12] r/aws_finspace_kx_volume(test): use find in test check func --- internal/service/finspace/kx_volume.go | 6 +- internal/service/finspace/kx_volume_test.go | 66 +++++++++------------ 2 files changed, 32 insertions(+), 40 deletions(-) diff --git a/internal/service/finspace/kx_volume.go b/internal/service/finspace/kx_volume.go index c550a4ead2f0..72bad740dccf 100644 --- a/internal/service/finspace/kx_volume.go +++ b/internal/service/finspace/kx_volume.go @@ -223,7 +223,7 @@ func resourceKxVolumeRead(ctx context.Context, d *schema.ResourceData, meta inte var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) - out, err := findKxVolumeByID(ctx, conn, d.Id()) + out, err := FindKxVolumeByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] FinSpace KxVolume (%s) not found, removing from state", d.Id()) @@ -382,7 +382,7 @@ func waitKxVolumeDeleted(ctx context.Context, conn *finspace.Client, id string, func statusKxVolume(ctx context.Context, conn *finspace.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - out, err := findKxVolumeByID(ctx, conn, id) + out, err := FindKxVolumeByID(ctx, conn, id) if tfresource.NotFound(err) { return nil, "", nil } @@ -395,7 +395,7 @@ func statusKxVolume(ctx context.Context, conn *finspace.Client, id string) retry } } -func findKxVolumeByID(ctx context.Context, conn *finspace.Client, id string) (*finspace.GetKxVolumeOutput, error) { +func FindKxVolumeByID(ctx context.Context, conn *finspace.Client, id string) (*finspace.GetKxVolumeOutput, error) { parts, err := flex.ExpandResourceId(id, kxVolumeIDPartCount, false) if err != nil { return nil, err diff --git a/internal/service/finspace/kx_volume_test.go b/internal/service/finspace/kx_volume_test.go index 84dade247bd7..b1af01ddd14a 100644 --- a/internal/service/finspace/kx_volume_test.go +++ b/internal/service/finspace/kx_volume_test.go @@ -9,7 +9,6 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/finspace" "github.com/aws/aws-sdk-go-v2/service/finspace/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" @@ -28,7 +27,7 @@ func TestAccFinSpaceKxVolume_basic(t *testing.T) { } ctx := acctest.Context(t) - var KxVolume finspace.GetKxVolumeOutput + var volume finspace.GetKxVolumeOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_finspace_kx_volume.test" @@ -44,7 +43,7 @@ func TestAccFinSpaceKxVolume_basic(t *testing.T) { { Config: testAccKxVolumeConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckKxVolumeExists(ctx, resourceName, &KxVolume), + testAccCheckKxVolumeExists(ctx, resourceName, &volume), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "status", string(types.KxVolumeStatusActive)), ), @@ -64,7 +63,7 @@ func TestAccFinSpaceKxVolume_disappears(t *testing.T) { } ctx := acctest.Context(t) - var KxVolume finspace.GetKxVolumeOutput + var volume finspace.GetKxVolumeOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_finspace_kx_volume.test" @@ -80,7 +79,7 @@ func TestAccFinSpaceKxVolume_disappears(t *testing.T) { { Config: testAccKxVolumeConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckKxVolumeExists(ctx, resourceName, &KxVolume), + testAccCheckKxVolumeExists(ctx, resourceName, &volume), acctest.CheckResourceDisappears(ctx, acctest.Provider, tffinspace.ResourceKxVolume(), resourceName), ), ExpectNonEmptyPlan: true, @@ -98,11 +97,7 @@ func testAccCheckKxVolumeDestroy(ctx context.Context) resource.TestCheckFunc { continue } - input := &finspace.GetKxVolumeInput{ - VolumeName: aws.String(rs.Primary.Attributes["name"]), - EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), - } - _, err := conn.GetKxVolume(ctx, input) + _, err := tffinspace.FindKxVolumeByID(ctx, conn, rs.Primary.ID) if err != nil { var nfe *types.ResourceNotFoundException if errors.As(err, &nfe) { @@ -118,6 +113,30 @@ func testAccCheckKxVolumeDestroy(ctx context.Context) resource.TestCheckFunc { } } +func testAccCheckKxVolumeExists(ctx context.Context, name string, volume *finspace.GetKxVolumeOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + resp, err := tffinspace.FindKxVolumeByID(ctx, conn, rs.Primary.ID) + if err != nil { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, rs.Primary.ID, err) + } + + *volume = *resp + + return nil + } +} + func testAccKxVolumeConfig_basic(rName string) string { return acctest.ConfigCompose( testAccKxVolumeConfigBase(rName), @@ -249,30 +268,3 @@ resource "aws_route" "r" { } `, rName) } - -func testAccCheckKxVolumeExists(ctx context.Context, name string, KxVolume *finspace.GetKxVolumeOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, name, errors.New("not found")) - } - - if rs.Primary.ID == "" { - return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, name, errors.New("not set")) - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) - resp, err := conn.GetKxVolume(ctx, &finspace.GetKxVolumeInput{ - VolumeName: aws.String(rs.Primary.Attributes["name"]), - EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), - }) - - if err != nil { - return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, rs.Primary.ID, err) - } - - *KxVolume = *resp - - return nil - } -} From 08fcc0b0f79b45dc1471fd9082b1b83deb8cf16c Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:47:28 -0500 Subject: [PATCH 07/12] chore: changelog --- .changelog/34833.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/34833.txt diff --git a/.changelog/34833.txt b/.changelog/34833.txt new file mode 100644 index 000000000000..e1e350824ea5 --- /dev/null +++ b/.changelog/34833.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_finspace_kx_volume +``` From 5a7600eda4cce5c6c3f5584c3fe26dbbc534424e Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:59:52 -0500 Subject: [PATCH 08/12] r/aws_finspace_kx_volume(test): fmt config --- internal/service/finspace/kx_volume_test.go | 126 ++++++++++---------- 1 file changed, 61 insertions(+), 65 deletions(-) diff --git a/internal/service/finspace/kx_volume_test.go b/internal/service/finspace/kx_volume_test.go index b1af01ddd14a..616261e50971 100644 --- a/internal/service/finspace/kx_volume_test.go +++ b/internal/service/finspace/kx_volume_test.go @@ -137,32 +137,10 @@ func testAccCheckKxVolumeExists(ctx context.Context, name string, volume *finspa } } -func testAccKxVolumeConfig_basic(rName string) string { - return acctest.ConfigCompose( - testAccKxVolumeConfigBase(rName), - fmt.Sprintf(` -resource "aws_finspace_kx_volume" "test" { - name = %[1]q - environment_id = aws_finspace_kx_environment.test.id - availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] - az_mode = "SINGLE" - type = "NAS_1" - nas1_configuration { - type= "SSD_250" - size= 1200 - } -} -`, rName)) -} - func testAccKxVolumeConfigBase(rName string) string { return fmt.Sprintf(` data "aws_caller_identity" "current" {} data "aws_partition" "current" {} - -output "account_id" { - value = data.aws_caller_identity.current.account_id -} resource "aws_kms_key" "test" { deletion_window_in_days = 7 @@ -175,49 +153,49 @@ resource "aws_finspace_kx_environment" "test" { data "aws_iam_policy_document" "key_policy" { statement { - actions = [ - "kms:Decrypt", - "kms:GenerateDataKey" - ] - - resources = [ - aws_kms_key.test.arn, - ] - - principals { - type = "Service" - identifiers = ["finspace.amazonaws.com"] - } - - condition { - test = "ArnLike" - variable = "aws:SourceArn" - values = ["${aws_finspace_kx_environment.test.arn}/*"] - } - - condition { - test = "StringEquals" - variable = "aws:SourceAccount" - values = [data.aws_caller_identity.current.account_id] - } + actions = [ + "kms:Decrypt", + "kms:GenerateDataKey" + ] + + resources = [ + aws_kms_key.test.arn, + ] + + principals { + type = "Service" + identifiers = ["finspace.amazonaws.com"] + } + + condition { + test = "ArnLike" + variable = "aws:SourceArn" + values = ["${aws_finspace_kx_environment.test.arn}/*"] + } + + condition { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [data.aws_caller_identity.current.account_id] + } } statement { - actions = [ - "kms:*", - ] - + actions = [ + "kms:*", + ] + resources = [ - "*", - ] - - principals { - type = "AWS" - identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] - } + "*", + ] + + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] + } } } - + resource "aws_kms_key_policy" "test" { key_id = aws_kms_key.test.id policy = data.aws_iam_policy_document.key_policy.json @@ -240,16 +218,16 @@ resource "aws_security_group" "test" { ingress { from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] } egress { from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] } } @@ -268,3 +246,21 @@ resource "aws_route" "r" { } `, rName) } + +func testAccKxVolumeConfig_basic(rName string) string { + return acctest.ConfigCompose( + testAccKxVolumeConfigBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_volume" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type= "SSD_250" + size= 1200 + } +} +`, rName)) +} From 65bd94e709d92e7759709948a8d7043738510fb1 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:07:48 -0500 Subject: [PATCH 09/12] r/aws_finspace_kx_volume(doc): fmt config, tidy descriptions --- .../docs/r/finspace_kx_volume.html.markdown | 48 +++++++++---------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/website/docs/r/finspace_kx_volume.html.markdown b/website/docs/r/finspace_kx_volume.html.markdown index b573a81efddf..35e75bf3e32a 100644 --- a/website/docs/r/finspace_kx_volume.html.markdown +++ b/website/docs/r/finspace_kx_volume.html.markdown @@ -16,15 +16,15 @@ Terraform resource for managing an AWS FinSpace Kx Volume. ```terraform resource "aws_finspace_kx_volume" "example" { - name = "my-tf-kx-volume" - environment_id = aws_finspace_kx_environment.example.id - availability_zones = "use1-az2" - az_mode = "SINGLE" - type = "NAS_1" - nas1_configuration { - type= "SSD_250" - size= 1200 - } + name = "my-tf-kx-volume" + environment_id = aws_finspace_kx_environment.example.id + availability_zones = "use1-az2" + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + size= 1200 + type= "SSD_250" + } } ``` @@ -33,24 +33,24 @@ resource "aws_finspace_kx_volume" "example" { The following arguments are required: * `az_mode` - (Required) The number of availability zones you want to assign per volume. Currently, Finspace only support SINGLE for volumes. - * SINGLE - Assigns one availability zone per volume. + * `SINGLE` - Assigns one availability zone per volume. * `environment_id` - (Required) A unique identifier for the kdb environment, whose clusters can attach to the volume. * `name` - (Required) Unique name for the volumr that you want to create. -* `type` - (Required) The type of file system volume. Currently, FinSpace only supports NAS_1 volume type. When you select NAS_1 volume type, you must also provide nas1Configuration. +* `type` - (Required) The type of file system volume. Currently, FinSpace only supports the `NAS_1` volume type. When you select the `NAS_1` volume type, you must also provide `nas1_configuration`. * `availability_zones` - (Required) The identifier of the AWS Availability Zone IDs. The following arguments are optional: -* `nas1_configuration` - (Optional) Specifies the configuration for the Network attached storage (NAS_1) file system volume. This parameter is required when you choose volumeType as NAS_1. +* `nas1_configuration` - (Optional) Specifies the configuration for the Network attached storage (`NAS_1`) file system volume. This parameter is required when `volume_type` is `NAS_1`. See [`nas1_configuration` Argument Reference](#nas1_configuration-argument-reference) below. * `description` - (Optional) Description of the volume. * `tags` - (Optional) A list of key-value pairs to label the volume. You can add up to 50 tags to a volume -### nas1_configuration +### `nas1_configuration` Argument Reference -The nas1_configuration block supports the following arguments: +The `nas1_configuration` block supports the following arguments: * `size` - (Required) The size of the network attached storage. -* `security_group_ids` - (Required) The type of the network attached storage. +* `type` - (Required) The type of the network attached storage. ## Attribute Reference @@ -59,15 +59,15 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - Amazon Resource Name (ARN) identifier of the KX volume. * `created_timestamp` - The timestamp at which the volume was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. * `status` - The status of volume creation. - * CREATING – The volume creation is in progress. - * CREATE_FAILED – The volume creation has failed. - * ACTIVE – The volume is active. - * UPDATING – The volume is in the process of being updated. - * UPDATE_FAILED – The update action failed. - * UPDATED – The volume is successfully updated. - * DELETING – The volume is in the process of being deleted. - * DELETE_FAILED – The system failed to delete the volume. - * DELETED – The volume is successfully deleted. + * `CREATING` – The volume creation is in progress. + * `CREATE_FAILED` – The volume creation has failed. + * `ACTIVE` – The volume is active. + * `UPDATING` – The volume is in the process of being updated. + * `UPDATE_FAILED` – The update action failed. + * `UPDATED` – The volume is successfully updated. + * `DELETING` – The volume is in the process of being deleted. + * `DELETE_FAILED` – The system failed to delete the volume. + * `DELETED` – The volume is successfully deleted. * `status_reason` - The error message when a failed state occurs. * `last_modified_timestamp` - Last timestamp at which the volume was updated in FinSpace. Value determined as epoch time in seconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000. From e68fd67a607911841e9838dcf92b2ff9a4802c62 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:09:59 -0500 Subject: [PATCH 10/12] r/aws_finspace_kx_volume(doc): fmt config again --- website/docs/r/finspace_kx_volume.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/finspace_kx_volume.html.markdown b/website/docs/r/finspace_kx_volume.html.markdown index 35e75bf3e32a..0ddc66dc9e6f 100644 --- a/website/docs/r/finspace_kx_volume.html.markdown +++ b/website/docs/r/finspace_kx_volume.html.markdown @@ -22,8 +22,8 @@ resource "aws_finspace_kx_volume" "example" { az_mode = "SINGLE" type = "NAS_1" nas1_configuration { - size= 1200 - type= "SSD_250" + size = 1200 + type = "SSD_250" } } ``` From 1ae3cedb7a2a8ced747b2c529cc12d3fda607d44 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:12:38 -0500 Subject: [PATCH 11/12] r/aws_finspace_kx_volume(test): fmt config again --- internal/service/finspace/kx_volume_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/finspace/kx_volume_test.go b/internal/service/finspace/kx_volume_test.go index 616261e50971..fa80b8b039a5 100644 --- a/internal/service/finspace/kx_volume_test.go +++ b/internal/service/finspace/kx_volume_test.go @@ -258,8 +258,8 @@ resource "aws_finspace_kx_volume" "test" { az_mode = "SINGLE" type = "NAS_1" nas1_configuration { - type= "SSD_250" - size= 1200 + type = "SSD_250" + size = 1200 } } `, rName)) From cf484a231ec772f65d032f55413505782b6c8459 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:35:13 -0500 Subject: [PATCH 12/12] r/aws_finspace_kx_volume: nolintlint --- internal/service/finspace/kx_volume.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/finspace/kx_volume.go b/internal/service/finspace/kx_volume.go index 72bad740dccf..0446eb223cea 100644 --- a/internal/service/finspace/kx_volume.go +++ b/internal/service/finspace/kx_volume.go @@ -346,7 +346,7 @@ func waitKxVolumeCreated(ctx context.Context, conn *finspace.Client, id string, return nil, err } -func waitKxVolumeUpdated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxVolumeOutput, error) { //nolint:unparam +func waitKxVolumeUpdated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxVolumeOutput, error) { stateConf := &retry.StateChangeConf{ Pending: enum.Slice(types.KxVolumeStatusCreating, types.KxVolumeStatusUpdating), Target: enum.Slice(types.KxVolumeStatusActive),