From 185ed79e21a351b4029b5f0de2600a1dd1be9888 Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Fri, 21 Oct 2022 10:13:49 -0700 Subject: [PATCH 01/32] add resource aws_fsx_filecache --- internal/service/fsx/file_cache.go | 778 +++++++++++++++++++++++++++++ 1 file changed, 778 insertions(+) create mode 100644 internal/service/fsx/file_cache.go diff --git a/internal/service/fsx/file_cache.go b/internal/service/fsx/file_cache.go new file mode 100644 index 000000000000..6e3991b626e8 --- /dev/null +++ b/internal/service/fsx/file_cache.go @@ -0,0 +1,778 @@ +package fsx + +import ( + "context" + "errors" + "log" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func ResourceFileCache() *schema.Resource { + return &schema.Resource{ + CreateWithoutTimeout: resourceFileCacheCreate, + ReadWithoutTimeout: resourceFileCacheRead, + UpdateWithoutTimeout: resourceFileCacheUpdate, + DeleteWithoutTimeout: resourceFileCacheDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "copy_tags_to_data_repository_associations": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "data_repository_associations": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 8, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "association_id": { + Type: schema.TypeString, + Computed: true, + }, + "data_repository_path": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 4357), + ), + }, + "data_repository_subdirectories": { + Type: schema.TypeList, + Optional: true, + MaxItems: 500, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 4096), + ), + }, + }, + "file_cache_id": { + Type: schema.TypeString, + Computed: true, + }, + "file_cache_path": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 4096), + ), + }, + "file_system_id": { + Type: schema.TypeString, + Computed: true, + }, + "file_system_path": { + Type: schema.TypeString, + Computed: true, + }, + "imported_file_chunk_size": { + Type: schema.TypeInt, + Computed: true, + }, + "nfs": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dns_ips": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 10, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.All( + validation.StringLenBetween(7, 15), + validation.StringMatch(regexp.MustCompile(`^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$`), "invalid pattern"), + ), + }, + }, + "version": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringInSlice(fsx.NfsVersion_Values(), false), + ), + }, + }, + }, + }, + "resource_arn": { + Type: schema.TypeString, + Computed: true, + }, + "tags": { + Type: schema.TypeMap, + Computed: true, + }, + }, + }, + }, + "data_repository_association_ids": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "dns_name": { + Type: schema.TypeString, + Computed: true, + }, + "file_cache_id": { + Type: schema.TypeString, + Computed: true, + }, + "file_cache_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringInSlice(fsx.FileCacheType_Values(), false), + ), + }, + "file_cache_type_version": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 20), + validation.StringMatch(regexp.MustCompile(`^[0-9](.[0-9]*)*$`), "invalid pattern"), + ), + }, + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: verify.ValidARN, + }, + "lustre_configuration": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deployment_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringInSlice(fsx.FileCacheLustreDeploymentType_Values(), false), + ), + }, + "log_configuration": { + Type: schema.TypeList, + Computed: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "destination": { + Type: schema.TypeString, + Computed: true, + }, + "level": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "metadata_configuration": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + MaxItems: 8, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "storage_capacity": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.IntBetween(0, 2147483647), + ), + }, + }, + }, + }, + "mount_name": { + Type: schema.TypeString, + Computed: true, + }, + "per_unit_storage_throughput": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.IntBetween(12, 1000), + ), + }, + "weekly_maintenance_start_time": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(7, 7), + validation.StringMatch(regexp.MustCompile(`^[1-7]:([01]\d|2[0-3]):?([0-5]\d)$`), "invalid pattern"), + ), + }, + }, + }, + }, + "network_interface_ids": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "resource_arn": { + Type: schema.TypeString, + Computed: true, + }, + "security_group_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 50, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "storage_capacity": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.IntBetween(0, 2147483647), + ), + }, + "subnet_ids": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + MaxItems: 50, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "tags": tftags.TagsSchema(), + "tags_all": tftags.TagsSchemaComputed(), + "vpc_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + CustomizeDiff: verify.SetTagsDiff, + } +} + +const ( + ResNameFileCache = "File Cache" +) + +func resourceFileCacheCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*conns.AWSClient).FSxConn + + input := &fsx.CreateFileCacheInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + FileCacheType: aws.String(d.Get("file_cache_type").(string)), + FileCacheTypeVersion: aws.String(d.Get("file_cache_type_version").(string)), + StorageCapacity: aws.Int64(int64(d.Get("storage_capacity").(int))), + SubnetIds: flex.ExpandStringList(d.Get("subnet_ids").([]interface{})), + } + if v, ok := d.GetOk("copy_tags_to_data_repository_associations"); ok { + input.CopyTagsToDataRepositoryAssociations = aws.Bool(v.(bool)) + } + if v, ok := d.GetOk("data_repository_associations"); ok && len(v.([]interface{})) > 0 { + input.DataRepositoryAssociations = expandDataRepositoryAssociations(v.([]interface{})) + } + if v, ok := d.GetOk("kms_key_id"); ok { + input.KmsKeyId = aws.String(v.(string)) + } + if v, ok := d.GetOk("lustre_configuration"); ok && len(v.([]interface{})) > 0 { + input.LustreConfiguration = expandCreateFileCacheLustreConfiguration(v.([]interface{})) + } + if v, ok := d.GetOk("security_group_ids"); ok { + input.SecurityGroupIds = flex.ExpandStringList(v.([]interface{})) + } + + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + tags := defaultTagsConfig.MergeTags(tftags.New(d.Get("tags").(map[string]interface{}))) + + if len(tags) > 0 { + input.Tags = Tags(tags.IgnoreAWS()) + } + + result, err := conn.CreateFileCacheWithContext(ctx, input) + if err != nil { + return create.DiagError(names.FSx, create.ErrActionCreating, ResNameFileCache, "", err) + } + + d.SetId(aws.StringValue(result.FileCache.FileCacheId)) + + if _, err := waitFileCacheCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return create.DiagError(names.FSx, create.ErrActionWaitingForCreation, ResNameFileCache, d.Id(), err) + } + + return resourceFileCacheRead(ctx, d, meta) +} + +func resourceFileCacheRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*conns.AWSClient).FSxConn + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig + + filecache, err := findFileCacheByID(ctx, conn, d.Id()) + + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] FSx FileCache (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return create.DiagError(names.FSx, create.ErrActionReading, ResNameFileCache, d.Id(), err) + } + + d.Set("dns_name", filecache.DNSName) + d.Set("file_cache_type", filecache.FileCacheType) + d.Set("file_cache_type_version", filecache.FileCacheTypeVersion) + d.Set("kms_key_id", filecache.KmsKeyId) + d.Set("owner_id", filecache.OwnerId) + d.Set("resource_arn", filecache.ResourceARN) + d.Set("storage_capacity", filecache.StorageCapacity) + d.Set("subnet_ids", aws.StringValueSlice(filecache.SubnetIds)) + d.Set("vpc_id", filecache.VpcId) + + if err := d.Set("data_repository_association_ids", aws.StringValueSlice(filecache.DataRepositoryAssociationIds)); err != nil { + return create.DiagError(names.FSx, create.ErrActionSetting, ResNameFileCache, d.Id(), err) + } + if err := d.Set("lustre_configuration", flattenFileCacheLustreConfiguration(filecache.LustreConfiguration)); err != nil { + return create.DiagError(names.FSx, create.ErrActionSetting, ResNameFileCache, d.Id(), err) + } + if err := d.Set("network_interface_ids", aws.StringValueSlice(filecache.NetworkInterfaceIds)); err != nil { + return create.DiagError(names.FSx, create.ErrActionSetting, ResNameFileCache, d.Id(), err) + } + if err := d.Set("subnet_ids", aws.StringValueSlice(filecache.SubnetIds)); err != nil { + return create.DiagError(names.FSx, create.ErrActionSetting, ResNameFileCache, d.Id(), err) + } + + // Lookup and set Data Repository Associations + data_repository_associations, err := flattenDataRepositoryAssociations(ctx, conn, meta, filecache.DataRepositoryAssociationIds) + + if err := d.Set("data_repository_associations", data_repository_associations); err != nil { + return create.DiagError(names.FSx, create.ErrActionSetting, ResNameFileCache, d.Id(), err) + } + + //Volume tags do not get returned with describe call so need to make a separate list tags call + tags, tagserr := ListTags(conn, *filecache.ResourceARN) + + if tagserr != nil { + return create.DiagError(names.FSx, create.ErrActionReading, ResNameFileCache, d.Id(), err) + } else { + tags = tags.IgnoreAWS().IgnoreConfig(ignoreTagsConfig) + } + if err := d.Set("tags", tags.RemoveDefaultConfig(defaultTagsConfig).Map()); err != nil { + return create.DiagError(names.FSx, create.ErrActionSetting, ResNameFileCache, d.Id(), err) + } + if err := d.Set("tags_all", tags.Map()); err != nil { + return create.DiagError(names.FSx, create.ErrActionSetting, ResNameFileCache, d.Id(), err) + } + return nil +} + +func resourceFileCacheUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + + conn := meta.(*conns.AWSClient).FSxConn + + if d.HasChange("tags_all") { + o, n := d.GetChange("tags_all") + + if err := UpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return create.DiagError(names.FSx, create.ErrActionUpdating, ResNameFileCache, d.Id(), err) + } + } + + update := false + + input := &fsx.UpdateFileCacheInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + FileCacheId: aws.String(d.Id()), + LustreConfiguration: &fsx.UpdateFileCacheLustreConfiguration{}, + } + + if d.HasChanges("lustre_configuration") { + input.LustreConfiguration = expandUpdateFileCacheLustreConfiguration(d.Get("lustre_configuration").([]interface{})) + update = true + } + + if !update { + return nil + } + + log.Printf("[DEBUG] Updating FSx FileCache (%s): %#v", d.Id(), input) + result, err := conn.UpdateFileCacheWithContext(ctx, input) + if err != nil { + return create.DiagError(names.FSx, create.ErrActionUpdating, ResNameFileCache, d.Id(), err) + } + + if _, err := waitFileCacheUpdated(ctx, conn, aws.StringValue(result.FileCache.FileCacheId), d.Timeout(schema.TimeoutUpdate)); err != nil { + return create.DiagError(names.FSx, create.ErrActionWaitingForUpdate, ResNameFileCache, d.Id(), err) + } + + return resourceFileCacheRead(ctx, d, meta) +} + +func resourceFileCacheDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*conns.AWSClient).FSxConn + log.Printf("[INFO] Deleting FSx FileCache %s", d.Id()) + + _, err := conn.DeleteFileCacheWithContext(ctx, &fsx.DeleteFileCacheInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + FileCacheId: aws.String(d.Id()), + }) + + if tfawserr.ErrCodeEquals(err, fsx.ErrCodeFileCacheNotFound) { + return nil + } + if err != nil { + return create.DiagError(names.FSx, create.ErrActionDeleting, ResNameFileCache, d.Id(), err) + } + if _, err := waitFileCacheDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return create.DiagError(names.FSx, create.ErrActionWaitingForDeletion, ResNameFileCache, d.Id(), err) + } + + return nil +} + +func waitFileCacheCreated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.FileCache, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{fsx.FileCacheLifecycleCreating}, + Target: []string{fsx.FileCacheLifecycleAvailable}, + Refresh: statusFileCache(ctx, conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*fsx.FileCache); ok { + if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.FileCacheLifecycleFailed && details != nil { + tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) + } + return output, err + } + return nil, err +} + +func waitFileCacheUpdated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.FileCache, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{fsx.FileCacheLifecycleUpdating}, + Target: []string{fsx.FileCacheLifecycleAvailable}, + Refresh: statusFileCache(ctx, conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*fsx.FileCache); ok { + if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.FileCacheLifecycleFailed && details != nil { + tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) + } + return output, err + } + + return nil, err +} + +func waitFileCacheDeleted(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.FileCache, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{fsx.FileCacheLifecycleAvailable, fsx.FileCacheLifecycleDeleting}, + Target: []string{}, + Refresh: statusFileCache(ctx, conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*fsx.FileCache); ok { + if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.FileCacheLifecycleFailed && details != nil { + tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) + } + return output, err + } + + return nil, err +} + +func statusFileCache(ctx context.Context, conn *fsx.FSx, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + out, err := findFileCacheByID(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return out, aws.StringValue(out.Lifecycle), nil + } +} + +func findFileCacheByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.FileCache, error) { + + input := &fsx.DescribeFileCachesInput{ + FileCacheIds: []*string{aws.String(id)}, + } + var fileCaches []*fsx.FileCache + + err := conn.DescribeFileCachesPages(input, func(page *fsx.DescribeFileCachesOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + fileCaches = append(fileCaches, page.FileCaches...) + + return !lastPage + }) + + if tfawserr.ErrCodeEquals(err, fsx.ErrCodeFileCacheNotFound) { + return nil, &resource.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + if err != nil { + return nil, err + } + if len(fileCaches) == 0 || fileCaches[0] == nil { + return nil, tfresource.NewEmptyResultError(input) + } + if count := len(fileCaches); count > 1 { + return nil, tfresource.NewTooManyResultsError(count, input) + } + return fileCaches[0], nil +} + +func flattenDataRepositoryAssociations(ctx context.Context, conn *fsx.FSx, meta interface{}, dataRepositoryAssociationIds []*string) ([]interface{}, error) { + in := &fsx.DescribeDataRepositoryAssociationsInput{ + AssociationIds: dataRepositoryAssociationIds, + } + result, err := conn.DescribeDataRepositoryAssociationsWithContext(ctx, in) + + if tfawserr.ErrCodeEquals(err, fsx.ErrCodeFileCacheNotFound) { + return nil, &resource.NotFoundError{ + LastError: err, + LastRequest: in, + } + } + if err != nil { + return nil, err + } + if result == nil || result.Associations == nil { + return nil, tfresource.NewEmptyResultError(in) + } + + ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig + dataRepositoryAssociationsList := []interface{}{} + + for _, dataRepositoryAssociation := range result.Associations { + tags := KeyValueTags(dataRepositoryAssociation.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig) + + values := map[string]interface{}{ + "association_id": dataRepositoryAssociation.AssociationId, + "data_repository_path": dataRepositoryAssociation.DataRepositoryPath, + "data_repository_subdirectories": aws.StringValueSlice(dataRepositoryAssociation.DataRepositorySubdirectories), + "file_cache_id": dataRepositoryAssociation.FileCacheId, + "file_cache_path": dataRepositoryAssociation.FileCachePath, + "imported_file_chunk_size": dataRepositoryAssociation.ImportedFileChunkSize, + "nfs": flattenNFSDataRepositoryConfiguration(dataRepositoryAssociation.NFS), + "resource_arn": dataRepositoryAssociation.ResourceARN, + "tags": tags, + } + + dataRepositoryAssociationsList = append(dataRepositoryAssociationsList, values) + } + return dataRepositoryAssociationsList, nil +} + +func flattenDataRepositoryAssociationTags(tags []*fsx.Tag) []map[string]interface{} { + + dataRepositoryAssociationTags := make([]map[string]interface{}, 0) + + for _, tag := range tags { + values := map[string]interface{}{ + "key": tag.Key, + "value": tag.Value, + } + dataRepositoryAssociationTags = append(dataRepositoryAssociationTags, values) + } + return dataRepositoryAssociationTags +} + +func flattenNFSDataRepositoryConfiguration(nfsDataRepositoryConfiguration *fsx.NFSDataRepositoryConfiguration) []map[string]interface{} { + if nfsDataRepositoryConfiguration == nil { + return []map[string]interface{}{} + } + + values := map[string]interface{}{ + "dns_ips": aws.StringValueSlice(nfsDataRepositoryConfiguration.DnsIps), + "version": aws.StringValue(nfsDataRepositoryConfiguration.Version), + } + return []map[string]interface{}{values} +} + +func flattenFileCacheLustreConfiguration(fileCacheLustreConfiguration *fsx.FileCacheLustreConfiguration) []interface{} { + if fileCacheLustreConfiguration == nil { + return []interface{}{} + } + values := make(map[string]interface{}) + + if fileCacheLustreConfiguration.DeploymentType != nil { + values["deployment_type"] = aws.StringValue(fileCacheLustreConfiguration.DeploymentType) + } + if fileCacheLustreConfiguration.LogConfiguration != nil { + values["log_configuration"] = flattenLustreLogConfiguration(fileCacheLustreConfiguration.LogConfiguration) + } + if fileCacheLustreConfiguration.MetadataConfiguration != nil { + values["metadata_configuration"] = flattenFileCacheLustreMetadataConfiguration(fileCacheLustreConfiguration.MetadataConfiguration) + } + if fileCacheLustreConfiguration.MountName != nil { + values["mount_name"] = aws.StringValue(fileCacheLustreConfiguration.MountName) + } + if fileCacheLustreConfiguration.PerUnitStorageThroughput != nil { + values["per_unit_storage_throughput"] = aws.Int64Value(fileCacheLustreConfiguration.PerUnitStorageThroughput) + } + if fileCacheLustreConfiguration.WeeklyMaintenanceStartTime != nil { + values["weekly_maintenance_start_time"] = aws.StringValue(fileCacheLustreConfiguration.WeeklyMaintenanceStartTime) + } + + return []interface{}{values} +} + +func flattenFileCacheLustreMetadataConfiguration(fileCacheLustreMetadataConfiguration *fsx.FileCacheLustreMetadataConfiguration) []interface{} { + values := make(map[string]interface{}) + if fileCacheLustreMetadataConfiguration.StorageCapacity != nil { + values["storage_capacity"] = aws.Int64Value(fileCacheLustreMetadataConfiguration.StorageCapacity) + } + + return []interface{}{values} +} + +func expandDataRepositoryAssociations(l []interface{}) []*fsx.FileCacheDataRepositoryAssociation { + dataRepositoryAssociations := []*fsx.FileCacheDataRepositoryAssociation{} + + for _, dataRepositoryAssociation := range l { + tfMap := dataRepositoryAssociation.(map[string]interface{}) + req := &fsx.FileCacheDataRepositoryAssociation{} + + if v, ok := tfMap["data_repository_path"].(string); ok { + req.DataRepositoryPath = aws.String(v) + } + if v, ok := tfMap["data_repository_subdirectories"]; ok { + req.DataRepositorySubdirectories = flex.ExpandStringList(v.([]interface{})) + } + if v, ok := tfMap["file_cache_path"].(string); ok { + req.FileCachePath = aws.String(v) + } + if v, ok := tfMap["nfs"]; ok && len(v.([]interface{})) > 0 { + req.NFS = expandFileCacheNFSConfiguration(v.(map[string]interface{})) + } + dataRepositoryAssociations = append(dataRepositoryAssociations, req) + } + + return dataRepositoryAssociations +} + +func expandFileCacheNFSConfiguration(l map[string]interface{}) *fsx.FileCacheNFSConfiguration { + req := &fsx.FileCacheNFSConfiguration{} + if v, ok := l["dns_ips"]; ok { + req.DnsIps = flex.ExpandStringList(v.([]interface{})) + } + if v, ok := l["version"].(string); ok { + req.Version = aws.String(v) + } + + return req +} + +func expandUpdateFileCacheLustreConfiguration(l []interface{}) *fsx.UpdateFileCacheLustreConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + data := l[0].(map[string]interface{}) + req := &fsx.UpdateFileCacheLustreConfiguration{} + + if v, ok := data["weekly_maintenance_start_time"].(string); ok { + req.WeeklyMaintenanceStartTime = aws.String(v) + } + + return req +} + +func expandCreateFileCacheLustreConfiguration(l []interface{}) *fsx.CreateFileCacheLustreConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + data := l[0].(map[string]interface{}) + req := &fsx.CreateFileCacheLustreConfiguration{} + + if v, ok := data["deployment_type"].(string); ok { + req.DeploymentType = aws.String(v) + } + if v, ok := data["metadata_configuration"]; ok && len(v.([]interface{})) > 0 { + req.MetadataConfiguration = expandFileCacheLustreMetadataConfiguration(v.([]interface{})) + } + if v, ok := data["per_unit_storage_throughput"].(int); ok { + req.PerUnitStorageThroughput = aws.Int64(int64(v)) + } + if v, ok := data["weekly_maintenance_start_time"].(string); ok { + req.WeeklyMaintenanceStartTime = aws.String(v) + } + + return req +} + +func expandFileCacheLustreMetadataConfiguration(l []interface{}) *fsx.FileCacheLustreMetadataConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + data := l[0].(map[string]interface{}) + req := &fsx.FileCacheLustreMetadataConfiguration{} + + if v, ok := data["storage_capacity"].(int); ok { + req.StorageCapacity = aws.Int64(int64(v)) + } + return req +} From 7020b70f79d4bd251dacfc865eaae23ee3559a83 Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Fri, 21 Oct 2022 10:24:00 -0700 Subject: [PATCH 02/32] add changelog --- .changelog/27384.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/27384.txt diff --git a/.changelog/27384.txt b/.changelog/27384.txt new file mode 100644 index 000000000000..fb0cfabc23b4 --- /dev/null +++ b/.changelog/27384.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_fsx_filecache +``` \ No newline at end of file From 5b3b8903ceb640f2d1c9dc85d8d9661b5e1b48b4 Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Fri, 21 Oct 2022 11:18:44 -0700 Subject: [PATCH 03/32] fixed arn parameter name for tag updates --- internal/service/fsx/file_cache.go | 48 +++++++++++++----------------- 1 file changed, 21 insertions(+), 27 deletions(-) diff --git a/internal/service/fsx/file_cache.go b/internal/service/fsx/file_cache.go index 6e3991b626e8..916c6e20d4db 100644 --- a/internal/service/fsx/file_cache.go +++ b/internal/service/fsx/file_cache.go @@ -300,6 +300,8 @@ const ( func resourceFileCacheCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { conn := meta.(*conns.AWSClient).FSxConn + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + tags := defaultTagsConfig.MergeTags(tftags.New(d.Get("tags").(map[string]interface{}))) input := &fsx.CreateFileCacheInput{ ClientRequestToken: aws.String(resource.UniqueId()), @@ -323,15 +325,12 @@ func resourceFileCacheCreate(ctx context.Context, d *schema.ResourceData, meta i if v, ok := d.GetOk("security_group_ids"); ok { input.SecurityGroupIds = flex.ExpandStringList(v.([]interface{})) } - - defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig - tags := defaultTagsConfig.MergeTags(tftags.New(d.Get("tags").(map[string]interface{}))) - if len(tags) > 0 { input.Tags = Tags(tags.IgnoreAWS()) } result, err := conn.CreateFileCacheWithContext(ctx, input) + if err != nil { return create.DiagError(names.FSx, create.ErrActionCreating, ResNameFileCache, "", err) } @@ -416,38 +415,33 @@ func resourceFileCacheUpdate(ctx context.Context, d *schema.ResourceData, meta i if d.HasChange("tags_all") { o, n := d.GetChange("tags_all") - if err := UpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + if err := UpdateTags(conn, d.Get("resource_arn").(string), o, n); err != nil { return create.DiagError(names.FSx, create.ErrActionUpdating, ResNameFileCache, d.Id(), err) } } - update := false - - input := &fsx.UpdateFileCacheInput{ - ClientRequestToken: aws.String(resource.UniqueId()), - FileCacheId: aws.String(d.Id()), - LustreConfiguration: &fsx.UpdateFileCacheLustreConfiguration{}, - } + if d.HasChangesExcept("tags_all") { + input := &fsx.UpdateFileCacheInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + FileCacheId: aws.String(d.Id()), + LustreConfiguration: &fsx.UpdateFileCacheLustreConfiguration{}, + } - if d.HasChanges("lustre_configuration") { - input.LustreConfiguration = expandUpdateFileCacheLustreConfiguration(d.Get("lustre_configuration").([]interface{})) - update = true - } + if d.HasChanges("lustre_configuration") { + input.LustreConfiguration = expandUpdateFileCacheLustreConfiguration(d.Get("lustre_configuration").([]interface{})) + } - if !update { - return nil - } + log.Printf("[DEBUG] Updating FSx FileCache (%s): %#v", d.Id(), input) - log.Printf("[DEBUG] Updating FSx FileCache (%s): %#v", d.Id(), input) - result, err := conn.UpdateFileCacheWithContext(ctx, input) - if err != nil { - return create.DiagError(names.FSx, create.ErrActionUpdating, ResNameFileCache, d.Id(), err) - } + result, err := conn.UpdateFileCacheWithContext(ctx, input) + if err != nil { + return create.DiagError(names.FSx, create.ErrActionUpdating, ResNameFileCache, d.Id(), err) + } + if _, err := waitFileCacheUpdated(ctx, conn, aws.StringValue(result.FileCache.FileCacheId), d.Timeout(schema.TimeoutUpdate)); err != nil { + return create.DiagError(names.FSx, create.ErrActionWaitingForUpdate, ResNameFileCache, d.Id(), err) + } - if _, err := waitFileCacheUpdated(ctx, conn, aws.StringValue(result.FileCache.FileCacheId), d.Timeout(schema.TimeoutUpdate)); err != nil { - return create.DiagError(names.FSx, create.ErrActionWaitingForUpdate, ResNameFileCache, d.Id(), err) } - return resourceFileCacheRead(ctx, d, meta) } From fc80c64621aa724777e61a862a6264ebb4a0ef78 Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Fri, 21 Oct 2022 11:25:35 -0700 Subject: [PATCH 04/32] spilt up finders/status/waiters --- internal/service/fsx/file_cache.go | 114 +---------------------------- internal/service/fsx/find.go | 34 +++++++++ internal/service/fsx/status.go | 15 ++++ internal/service/fsx/wait.go | 63 ++++++++++++++++ 4 files changed, 113 insertions(+), 113 deletions(-) diff --git a/internal/service/fsx/file_cache.go b/internal/service/fsx/file_cache.go index 916c6e20d4db..b9245b52b515 100644 --- a/internal/service/fsx/file_cache.go +++ b/internal/service/fsx/file_cache.go @@ -2,7 +2,6 @@ package fsx import ( "context" - "errors" "log" "regexp" "time" @@ -349,7 +348,7 @@ func resourceFileCacheRead(ctx context.Context, d *schema.ResourceData, meta int defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig - filecache, err := findFileCacheByID(ctx, conn, d.Id()) + filecache, err := findFileCacheByID(conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] FSx FileCache (%s) not found, removing from state", d.Id()) @@ -467,117 +466,6 @@ func resourceFileCacheDelete(ctx context.Context, d *schema.ResourceData, meta i return nil } -func waitFileCacheCreated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.FileCache, error) { - stateConf := &resource.StateChangeConf{ - Pending: []string{fsx.FileCacheLifecycleCreating}, - Target: []string{fsx.FileCacheLifecycleAvailable}, - Refresh: statusFileCache(ctx, conn, id), - Timeout: timeout, - Delay: 30 * time.Second, - } - - outputRaw, err := stateConf.WaitForState() - - if output, ok := outputRaw.(*fsx.FileCache); ok { - if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.FileCacheLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) - } - return output, err - } - return nil, err -} - -func waitFileCacheUpdated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.FileCache, error) { - stateConf := &resource.StateChangeConf{ - Pending: []string{fsx.FileCacheLifecycleUpdating}, - Target: []string{fsx.FileCacheLifecycleAvailable}, - Refresh: statusFileCache(ctx, conn, id), - Timeout: timeout, - Delay: 30 * time.Second, - } - - outputRaw, err := stateConf.WaitForState() - - if output, ok := outputRaw.(*fsx.FileCache); ok { - if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.FileCacheLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) - } - return output, err - } - - return nil, err -} - -func waitFileCacheDeleted(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.FileCache, error) { - stateConf := &resource.StateChangeConf{ - Pending: []string{fsx.FileCacheLifecycleAvailable, fsx.FileCacheLifecycleDeleting}, - Target: []string{}, - Refresh: statusFileCache(ctx, conn, id), - Timeout: timeout, - Delay: 30 * time.Second, - } - - outputRaw, err := stateConf.WaitForState() - - if output, ok := outputRaw.(*fsx.FileCache); ok { - if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.FileCacheLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) - } - return output, err - } - - return nil, err -} - -func statusFileCache(ctx context.Context, conn *fsx.FSx, id string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - out, err := findFileCacheByID(ctx, conn, id) - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return out, aws.StringValue(out.Lifecycle), nil - } -} - -func findFileCacheByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.FileCache, error) { - - input := &fsx.DescribeFileCachesInput{ - FileCacheIds: []*string{aws.String(id)}, - } - var fileCaches []*fsx.FileCache - - err := conn.DescribeFileCachesPages(input, func(page *fsx.DescribeFileCachesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - fileCaches = append(fileCaches, page.FileCaches...) - - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeFileCacheNotFound) { - return nil, &resource.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - if err != nil { - return nil, err - } - if len(fileCaches) == 0 || fileCaches[0] == nil { - return nil, tfresource.NewEmptyResultError(input) - } - if count := len(fileCaches); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) - } - return fileCaches[0], nil -} - func flattenDataRepositoryAssociations(ctx context.Context, conn *fsx.FSx, meta interface{}, dataRepositoryAssociationIds []*string) ([]interface{}, error) { in := &fsx.DescribeDataRepositoryAssociationsInput{ AssociationIds: dataRepositoryAssociationIds, diff --git a/internal/service/fsx/find.go b/internal/service/fsx/find.go index b5b3456b9444..202d1907a6af 100644 --- a/internal/service/fsx/find.go +++ b/internal/service/fsx/find.go @@ -54,6 +54,40 @@ func FindBackupByID(conn *fsx.FSx, id string) (*fsx.Backup, error) { return output.Backups[0], nil } +func findFileCacheByID(conn *fsx.FSx, id string) (*fsx.FileCache, error) { + + input := &fsx.DescribeFileCachesInput{ + FileCacheIds: []*string{aws.String(id)}, + } + var fileCaches []*fsx.FileCache + + err := conn.DescribeFileCachesPages(input, func(page *fsx.DescribeFileCachesOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + fileCaches = append(fileCaches, page.FileCaches...) + + return !lastPage + }) + + if tfawserr.ErrCodeEquals(err, fsx.ErrCodeFileCacheNotFound) { + return nil, &resource.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + if err != nil { + return nil, err + } + if len(fileCaches) == 0 || fileCaches[0] == nil { + return nil, tfresource.NewEmptyResultError(input) + } + if count := len(fileCaches); count > 1 { + return nil, tfresource.NewTooManyResultsError(count, input) + } + return fileCaches[0], nil +} + func FindFileSystemByID(conn *fsx.FSx, id string) (*fsx.FileSystem, error) { input := &fsx.DescribeFileSystemsInput{ FileSystemIds: []*string{aws.String(id)}, diff --git a/internal/service/fsx/status.go b/internal/service/fsx/status.go index 3d0612c06088..593de851bc10 100644 --- a/internal/service/fsx/status.go +++ b/internal/service/fsx/status.go @@ -39,6 +39,21 @@ func statusBackup(conn *fsx.FSx, id string) resource.StateRefreshFunc { } } +func statusFileCache(conn *fsx.FSx, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + out, err := findFileCacheByID(conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return out, aws.StringValue(out.Lifecycle), nil + } +} + func statusFileSystem(conn *fsx.FSx, id string) resource.StateRefreshFunc { return func() (interface{}, string, error) { output, err := FindFileSystemByID(conn, id) diff --git a/internal/service/fsx/wait.go b/internal/service/fsx/wait.go index 5777f5ca3916..98120ef8a7d2 100644 --- a/internal/service/fsx/wait.go +++ b/internal/service/fsx/wait.go @@ -1,6 +1,7 @@ package fsx import ( + "context" "errors" "time" @@ -71,6 +72,68 @@ func waitBackupDeleted(conn *fsx.FSx, id string) (*fsx.Backup, error) { return nil, err } +func waitFileCacheCreated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.FileCache, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{fsx.FileCacheLifecycleCreating}, + Target: []string{fsx.FileCacheLifecycleAvailable}, + Refresh: statusFileCache(conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*fsx.FileCache); ok { + if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.FileCacheLifecycleFailed && details != nil { + tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) + } + return output, err + } + return nil, err +} + +func waitFileCacheUpdated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.FileCache, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{fsx.FileCacheLifecycleUpdating}, + Target: []string{fsx.FileCacheLifecycleAvailable}, + Refresh: statusFileCache(conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*fsx.FileCache); ok { + if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.FileCacheLifecycleFailed && details != nil { + tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) + } + return output, err + } + + return nil, err +} + +func waitFileCacheDeleted(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.FileCache, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{fsx.FileCacheLifecycleAvailable, fsx.FileCacheLifecycleDeleting}, + Target: []string{}, + Refresh: statusFileCache(conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*fsx.FileCache); ok { + if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.FileCacheLifecycleFailed && details != nil { + tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) + } + return output, err + } + + return nil, err +} + func waitFileSystemCreated(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.FileSystem, error) { //nolint:unparam stateConf := &resource.StateChangeConf{ Pending: []string{fsx.FileSystemLifecycleCreating}, From bfafa3b152b7b183d7b3ee73388ec68911224a48 Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Fri, 21 Oct 2022 15:57:07 -0700 Subject: [PATCH 05/32] initial tests --- internal/service/fsx/file_cache_test.go | 320 ++++++++++++++++++++++++ 1 file changed, 320 insertions(+) create mode 100644 internal/service/fsx/file_cache_test.go diff --git a/internal/service/fsx/file_cache_test.go b/internal/service/fsx/file_cache_test.go new file mode 100644 index 000000000000..ab36540213e1 --- /dev/null +++ b/internal/service/fsx/file_cache_test.go @@ -0,0 +1,320 @@ +package fsx_test + +import ( + "context" + "errors" + "fmt" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/names" + + tffsx "github.com/hashicorp/terraform-provider-aws/internal/service/fsx" +) + +func TestAccFSxFileCache_basic(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var filecache fsx.DescribeFileCachesOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_fsx_filecache.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) + }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckFileCacheDestroy, + Steps: []resource.TestStep{ + { + Config: testAccFileCacheConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFileCacheExists(resourceName, &filecache), + resource.TestCheckResourceAttrSet(resourceName, "copy_tags_to_data_repository_associations"), + resource.TestCheckResourceAttrSet(resourceName, "data_repository_association_ids"), + resource.TestCheckResourceAttrSet(resourceName, "dns_name"), + resource.TestCheckResourceAttrSet(resourceName, "file_cache_type"), + resource.TestCheckResourceAttrSet(resourceName, "file_cache_type_version"), + resource.TestCheckResourceAttrSet(resourceName, "id"), + resource.TestCheckResourceAttrSet(resourceName, "kms_key_id"), + resource.TestCheckResourceAttrSet(resourceName, "network_interface_ids"), + resource.TestCheckResourceAttrSet(resourceName, "owner_id"), + acctest.MatchResourceAttrRegionalARN(resourceName, "resource_arn", "fsx", regexp.MustCompile(`filecache:+.`)), + resource.TestCheckResourceAttrSet(resourceName, "storage_capacity"), + resource.TestCheckResourceAttrSet(resourceName, "subnet_ids"), + resource.TestCheckResourceAttrSet(resourceName, "vpc_id"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + }, + }, + }) +} + +func TestAccFSxFileCache_disappears(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var filecache fsx.DescribeFileCachesOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_fsx_filecache.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) + }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckFileCacheDestroy, + Steps: []resource.TestStep{ + { + Config: testAccFileCacheConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFileCacheExists(resourceName, &filecache), + acctest.CheckResourceDisappears(acctest.Provider, tffsx.ResourceFileCache(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccFSxFileCache_tags(t *testing.T) { + var filecache1, filecache2 fsx.DescribeFileCachesOutput + resourceName := "aws_fsx_filecache.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckFileCacheDestroy, + Steps: []resource.TestStep{ + { + Config: testAccFileCache_tags1("key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFileCacheExists(resourceName, &filecache1), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + }, + { + Config: testAccFileCache_tags2("key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFileCacheExists(resourceName, &filecache2), + testAccCheckFileCacheNotRecreated(&filecache1, &filecache2), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + +func testAccCheckFileCacheDestroy(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn + ctx := context.Background() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_fsx_filecache" { + continue + } + + _, err := conn.DescribeFileCachesWithContext(ctx, &fsx.DescribeFileCachesInput{ + FileCacheIds: []*string{aws.String(rs.Primary.ID)}, + }) + if err != nil { + if tfawserr.ErrCodeEquals(err, fsx.ErrCodeFileCacheNotFound) { + return nil + } + return err + } + + return create.Error(names.FSx, create.ErrActionCheckingDestroyed, tffsx.ResNameFileCache, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil +} + +func testAccCheckFileCacheExists(name string, filecache *fsx.DescribeFileCachesOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.FSx, create.ErrActionCheckingExistence, tffsx.ResNameFileCache, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.FSx, create.ErrActionCheckingExistence, tffsx.ResNameFileCache, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn + ctx := context.Background() + + resp, err := conn.DescribeFileCachesWithContext(ctx, &fsx.DescribeFileCachesInput{ + FileCacheIds: []*string{aws.String(rs.Primary.ID)}, + }) + + if err != nil { + return create.Error(names.FSx, create.ErrActionCheckingExistence, tffsx.ResNameFileCache, rs.Primary.ID, err) + } + + *filecache = *resp + + return nil + } +} + +func testAccCheckFileCacheNotRecreated(i, j *fsx.DescribeFileCachesOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + if aws.StringValue(i.FileCaches[0].FileCacheId) != aws.StringValue(j.FileCaches[0].FileCacheId) { + return fmt.Errorf("FSx File System (%s) recreated", aws.StringValue(i.FileCaches[0].FileCacheId)) + } + + return nil + } +} + +func testAccCheckFileCacheRecreated(i, j *fsx.DescribeFileCachesOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + if aws.StringValue(i.FileCaches[0].FileCacheId) == aws.StringValue(j.FileCaches[0].FileCacheId) { + return fmt.Errorf("FSx File System (%s) not recreated", aws.StringValue(i.FileCaches[0].FileCacheId)) + } + + return nil + } +} + +func testAccFileCacheConfig_basic(rName string) string { + return testAccFileCacheBaseConfig() + fmt.Sprint(` +resource "aws_fsx_filecache" "test" { + data_repository_associations { + data_repository_path = "s3://${aws_s3_bucket.test.id}" + file_cache_path = "/ns1" + } + + file_cache_type = "LUSTRE" + file_cache_type_version = "2.12" + + lustre_configuration { + deployment_type = "CACHE_1" + metadata_configuration { + storage_capacity = 2400 + } + per_unit_storage_throughput = 1000 + weekly_maintenance_start_time = "2:05:00" + } + + subnet_ids = [aws_subnet.test1.id] + storage_capacity = 1200 +} +`) +} + +func testAccFileCacheBaseConfig() string { + return ` +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test1" { + vpc_id = aws_vpc.test.id + cidr_block = "10.0.1.0/24" + availability_zone = data.aws_availability_zones.available.names[0] +} + +resource "aws_s3_bucket" "test" {} +` +} + +func testAccFileCache_tags1(tagKey1, tagValue1 string) string { + return testAccFileCacheBaseConfig() + fmt.Sprintf(` +resource "aws_fsx_filecache" "test" { + data_repository_associations { + data_repository_path = "s3://${aws_s3_bucket.test.id}" + file_cache_path = "/ns1" + } + + file_cache_type = "LUSTRE" + file_cache_type_version = "2.12" + + lustre_configuration { + deployment_type = "CACHE_1" + metadata_configuration { + storage_capacity = 2400 + } + per_unit_storage_throughput = 1000 + weekly_maintenance_start_time = "2:05:00" + } + + subnet_ids = [aws_subnet.test1.id] + storage_capacity = 1200 + + tags = { + %[1]q = %[2]q + } +} +`, tagKey1, tagValue1) +} + +func testAccFileCache_tags2(tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return testAccFileCacheBaseConfig() + fmt.Sprintf(` +resource "aws_fsx_filecache" "test" { + data_repository_associations { + data_repository_path = "s3://${aws_s3_bucket.test.id}" + file_cache_path = "/ns1" + } + + file_cache_type = "LUSTRE" + file_cache_type_version = "2.12" + + lustre_configuration { + deployment_type = "CACHE_1" + metadata_configuration { + storage_capacity = 2400 + } + per_unit_storage_throughput = 1000 + weekly_maintenance_start_time = "2:05:00" + } + + subnet_ids = [aws_subnet.test1.id] + storage_capacity = 1200 + + tags = { + %[1]q = %[2]q + %[3]q = %[4]q + } +} +`, tagKey1, tagValue1, tagKey2, tagValue2) +} From e84a25120f6a5c6fee41c9928fff1004d9cadf55 Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Fri, 21 Oct 2022 15:57:51 -0700 Subject: [PATCH 06/32] update provider.go with aws_fsx_filecache resource --- internal/provider/provider.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 6227bdf505a2..45b837019353 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -1524,6 +1524,7 @@ func New(_ context.Context) (*schema.Provider, error) { "aws_fsx_backup": fsx.ResourceBackup(), "aws_fsx_lustre_file_system": fsx.ResourceLustreFileSystem(), "aws_fsx_data_repository_association": fsx.ResourceDataRepositoryAssociation(), + "aws_fsx_filecache": fsx.ResourceFileCache(), "aws_fsx_ontap_file_system": fsx.ResourceOntapFileSystem(), "aws_fsx_ontap_storage_virtual_machine": fsx.ResourceOntapStorageVirtualMachine(), "aws_fsx_ontap_volume": fsx.ResourceOntapVolume(), From 3887e01d802a3236b4583f482daeee3d31f4dd81 Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Sun, 23 Oct 2022 14:26:50 -0400 Subject: [PATCH 07/32] fix issues with copy_tags_to_data_repository_associations --- internal/service/fsx/file_cache.go | 25 ++++--------------------- 1 file changed, 4 insertions(+), 21 deletions(-) diff --git a/internal/service/fsx/file_cache.go b/internal/service/fsx/file_cache.go index b9245b52b515..98083bbe4add 100644 --- a/internal/service/fsx/file_cache.go +++ b/internal/service/fsx/file_cache.go @@ -128,10 +128,7 @@ func ResourceFileCache() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "tags": { - Type: schema.TypeMap, - Computed: true, - }, + "tags": tftags.TagsSchemaComputed(), }, }, }, @@ -390,7 +387,7 @@ func resourceFileCacheRead(ctx context.Context, d *schema.ResourceData, meta int return create.DiagError(names.FSx, create.ErrActionSetting, ResNameFileCache, d.Id(), err) } - //Volume tags do not get returned with describe call so need to make a separate list tags call + //Cache tags do not get returned with describe call so need to make a separate list tags call tags, tagserr := ListTags(conn, *filecache.ResourceARN) if tagserr != nil { @@ -485,6 +482,7 @@ func flattenDataRepositoryAssociations(ctx context.Context, conn *fsx.FSx, meta return nil, tfresource.NewEmptyResultError(in) } + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig dataRepositoryAssociationsList := []interface{}{} @@ -500,28 +498,13 @@ func flattenDataRepositoryAssociations(ctx context.Context, conn *fsx.FSx, meta "imported_file_chunk_size": dataRepositoryAssociation.ImportedFileChunkSize, "nfs": flattenNFSDataRepositoryConfiguration(dataRepositoryAssociation.NFS), "resource_arn": dataRepositoryAssociation.ResourceARN, - "tags": tags, + "tags": tags.RemoveDefaultConfig(defaultTagsConfig).Map(), } - dataRepositoryAssociationsList = append(dataRepositoryAssociationsList, values) } return dataRepositoryAssociationsList, nil } -func flattenDataRepositoryAssociationTags(tags []*fsx.Tag) []map[string]interface{} { - - dataRepositoryAssociationTags := make([]map[string]interface{}, 0) - - for _, tag := range tags { - values := map[string]interface{}{ - "key": tag.Key, - "value": tag.Value, - } - dataRepositoryAssociationTags = append(dataRepositoryAssociationTags, values) - } - return dataRepositoryAssociationTags -} - func flattenNFSDataRepositoryConfiguration(nfsDataRepositoryConfiguration *fsx.NFSDataRepositoryConfiguration) []map[string]interface{} { if nfsDataRepositoryConfiguration == nil { return []map[string]interface{}{} From 2c01c3798b9e93658923321e16403bb2aaaff66b Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Sun, 23 Oct 2022 16:27:41 -0400 Subject: [PATCH 08/32] refactor data repository lookup+tags --- internal/service/fsx/file_cache.go | 35 +++++++----------------------- internal/service/fsx/find.go | 32 +++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 27 deletions(-) diff --git a/internal/service/fsx/file_cache.go b/internal/service/fsx/file_cache.go index 98083bbe4add..67db0f721545 100644 --- a/internal/service/fsx/file_cache.go +++ b/internal/service/fsx/file_cache.go @@ -381,9 +381,10 @@ func resourceFileCacheRead(ctx context.Context, d *schema.ResourceData, meta int } // Lookup and set Data Repository Associations - data_repository_associations, err := flattenDataRepositoryAssociations(ctx, conn, meta, filecache.DataRepositoryAssociationIds) - if err := d.Set("data_repository_associations", data_repository_associations); err != nil { + dataRepositoryAssociations, err := findDataRepositoryAssociationsByIDs(conn, filecache.DataRepositoryAssociationIds) + + if err := d.Set("data_repository_associations", flattenDataRepositoryAssociations(dataRepositoryAssociations, defaultTagsConfig, ignoreTagsConfig)); err != nil { return create.DiagError(names.FSx, create.ErrActionSetting, ResNameFileCache, d.Id(), err) } @@ -463,30 +464,10 @@ func resourceFileCacheDelete(ctx context.Context, d *schema.ResourceData, meta i return nil } -func flattenDataRepositoryAssociations(ctx context.Context, conn *fsx.FSx, meta interface{}, dataRepositoryAssociationIds []*string) ([]interface{}, error) { - in := &fsx.DescribeDataRepositoryAssociationsInput{ - AssociationIds: dataRepositoryAssociationIds, - } - result, err := conn.DescribeDataRepositoryAssociationsWithContext(ctx, in) - - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeFileCacheNotFound) { - return nil, &resource.NotFoundError{ - LastError: err, - LastRequest: in, - } - } - if err != nil { - return nil, err - } - if result == nil || result.Associations == nil { - return nil, tfresource.NewEmptyResultError(in) - } - - defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig - dataRepositoryAssociationsList := []interface{}{} +func flattenDataRepositoryAssociations(dataRepositoryAssociations []*fsx.DataRepositoryAssociation, defaultTagsConfig *tftags.DefaultConfig, ignoreTagsConfig *tftags.IgnoreConfig) []map[string]interface{} { + flattenedDataRepositoryAssociations := make([]map[string]interface{}, 0) - for _, dataRepositoryAssociation := range result.Associations { + for _, dataRepositoryAssociation := range dataRepositoryAssociations { tags := KeyValueTags(dataRepositoryAssociation.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig) values := map[string]interface{}{ @@ -500,9 +481,9 @@ func flattenDataRepositoryAssociations(ctx context.Context, conn *fsx.FSx, meta "resource_arn": dataRepositoryAssociation.ResourceARN, "tags": tags.RemoveDefaultConfig(defaultTagsConfig).Map(), } - dataRepositoryAssociationsList = append(dataRepositoryAssociationsList, values) + flattenedDataRepositoryAssociations = append(flattenedDataRepositoryAssociations, values) } - return dataRepositoryAssociationsList, nil + return flattenedDataRepositoryAssociations } func flattenNFSDataRepositoryConfiguration(nfsDataRepositoryConfiguration *fsx.NFSDataRepositoryConfiguration) []map[string]interface{} { diff --git a/internal/service/fsx/find.go b/internal/service/fsx/find.go index 202d1907a6af..29c392a8c88c 100644 --- a/internal/service/fsx/find.go +++ b/internal/service/fsx/find.go @@ -88,6 +88,38 @@ func findFileCacheByID(conn *fsx.FSx, id string) (*fsx.FileCache, error) { return fileCaches[0], nil } +func findDataRepositoryAssociationsByIDs(conn *fsx.FSx, ids []*string) ([]*fsx.DataRepositoryAssociation, error) { + input := &fsx.DescribeDataRepositoryAssociationsInput{ + AssociationIds: ids, + } + var dataRepositoryAssociations []*fsx.DataRepositoryAssociation + + err := conn.DescribeDataRepositoryAssociationsPages(input, func(page *fsx.DescribeDataRepositoryAssociationsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + for _, dataRepositoryAssociation := range page.Associations { + dataRepositoryAssociations = append(dataRepositoryAssociations, dataRepositoryAssociation) + } + return !lastPage + }) + + if tfawserr.ErrCodeEquals(err, fsx.ErrCodeDataRepositoryAssociationNotFound) { + return nil, &resource.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + if err != nil { + return nil, err + } + if len(dataRepositoryAssociations) == 0 || dataRepositoryAssociations[0] == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return dataRepositoryAssociations, nil +} + func FindFileSystemByID(conn *fsx.FSx, id string) (*fsx.FileSystem, error) { input := &fsx.DescribeFileSystemsInput{ FileSystemIds: []*string{aws.String(id)}, From 4900855d5b7e2fd4c23b49460ab30beb2fe60770 Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Sun, 23 Oct 2022 16:55:01 -0400 Subject: [PATCH 09/32] set file_cache_id --- internal/service/fsx/file_cache.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/service/fsx/file_cache.go b/internal/service/fsx/file_cache.go index 67db0f721545..ac28d57434e8 100644 --- a/internal/service/fsx/file_cache.go +++ b/internal/service/fsx/file_cache.go @@ -358,6 +358,7 @@ func resourceFileCacheRead(ctx context.Context, d *schema.ResourceData, meta int } d.Set("dns_name", filecache.DNSName) + d.Set("file_cache_id", filecache.FileCacheId) d.Set("file_cache_type", filecache.FileCacheType) d.Set("file_cache_type_version", filecache.FileCacheTypeVersion) d.Set("kms_key_id", filecache.KmsKeyId) From 580afc9bd01667fb3a2c5b713e7cc3b7752512c4 Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Sun, 23 Oct 2022 20:52:44 -0400 Subject: [PATCH 10/32] fix nfs data repositories --- internal/service/fsx/file_cache.go | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/internal/service/fsx/file_cache.go b/internal/service/fsx/file_cache.go index ac28d57434e8..c89f0b37fed8 100644 --- a/internal/service/fsx/file_cache.go +++ b/internal/service/fsx/file_cache.go @@ -45,7 +45,7 @@ func ResourceFileCache() *schema.Resource { Optional: true, Default: false, }, - "data_repository_associations": { + "data_repository_association": { Type: schema.TypeList, Optional: true, ForceNew: true, @@ -103,7 +103,7 @@ func ResourceFileCache() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "dns_ips": { - Type: schema.TypeSet, + Type: schema.TypeList, Optional: true, MaxItems: 10, Elem: &schema.Schema{ @@ -309,7 +309,7 @@ func resourceFileCacheCreate(ctx context.Context, d *schema.ResourceData, meta i if v, ok := d.GetOk("copy_tags_to_data_repository_associations"); ok { input.CopyTagsToDataRepositoryAssociations = aws.Bool(v.(bool)) } - if v, ok := d.GetOk("data_repository_associations"); ok && len(v.([]interface{})) > 0 { + if v, ok := d.GetOk("data_repository_association"); ok && len(v.([]interface{})) > 0 { input.DataRepositoryAssociations = expandDataRepositoryAssociations(v.([]interface{})) } if v, ok := d.GetOk("kms_key_id"); ok { @@ -385,7 +385,7 @@ func resourceFileCacheRead(ctx context.Context, d *schema.ResourceData, meta int dataRepositoryAssociations, err := findDataRepositoryAssociationsByIDs(conn, filecache.DataRepositoryAssociationIds) - if err := d.Set("data_repository_associations", flattenDataRepositoryAssociations(dataRepositoryAssociations, defaultTagsConfig, ignoreTagsConfig)); err != nil { + if err := d.Set("data_repository_association", flattenDataRepositoryAssociations(dataRepositoryAssociations, defaultTagsConfig, ignoreTagsConfig)); err != nil { return create.DiagError(names.FSx, create.ErrActionSetting, ResNameFileCache, d.Id(), err) } @@ -553,7 +553,7 @@ func expandDataRepositoryAssociations(l []interface{}) []*fsx.FileCacheDataRepos req.FileCachePath = aws.String(v) } if v, ok := tfMap["nfs"]; ok && len(v.([]interface{})) > 0 { - req.NFS = expandFileCacheNFSConfiguration(v.(map[string]interface{})) + req.NFS = expandFileCacheNFSConfiguration(v.([]interface{})) } dataRepositoryAssociations = append(dataRepositoryAssociations, req) } @@ -561,12 +561,17 @@ func expandDataRepositoryAssociations(l []interface{}) []*fsx.FileCacheDataRepos return dataRepositoryAssociations } -func expandFileCacheNFSConfiguration(l map[string]interface{}) *fsx.FileCacheNFSConfiguration { +func expandFileCacheNFSConfiguration(l []interface{}) *fsx.FileCacheNFSConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + data := l[0].(map[string]interface{}) + req := &fsx.FileCacheNFSConfiguration{} - if v, ok := l["dns_ips"]; ok { + if v, ok := data["dns_ips"]; ok { req.DnsIps = flex.ExpandStringList(v.([]interface{})) } - if v, ok := l["version"].(string); ok { + if v, ok := data["version"].(string); ok { req.Version = aws.String(v) } From 34a5966f31efc8e4dd26a8477908c4920b85b657 Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Sun, 23 Oct 2022 22:16:22 -0400 Subject: [PATCH 11/32] rename resource_arn to arn --- internal/service/fsx/file_cache.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/service/fsx/file_cache.go b/internal/service/fsx/file_cache.go index c89f0b37fed8..46e86c97fe32 100644 --- a/internal/service/fsx/file_cache.go +++ b/internal/service/fsx/file_cache.go @@ -40,6 +40,10 @@ func ResourceFileCache() *schema.Resource { }, Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, "copy_tags_to_data_repository_associations": { Type: schema.TypeBool, Optional: true, @@ -253,10 +257,6 @@ func ResourceFileCache() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "resource_arn": { - Type: schema.TypeString, - Computed: true, - }, "security_group_ids": { Type: schema.TypeList, Optional: true, @@ -357,13 +357,13 @@ func resourceFileCacheRead(ctx context.Context, d *schema.ResourceData, meta int return create.DiagError(names.FSx, create.ErrActionReading, ResNameFileCache, d.Id(), err) } + d.Set("arn", filecache.ResourceARN) d.Set("dns_name", filecache.DNSName) d.Set("file_cache_id", filecache.FileCacheId) d.Set("file_cache_type", filecache.FileCacheType) d.Set("file_cache_type_version", filecache.FileCacheTypeVersion) d.Set("kms_key_id", filecache.KmsKeyId) d.Set("owner_id", filecache.OwnerId) - d.Set("resource_arn", filecache.ResourceARN) d.Set("storage_capacity", filecache.StorageCapacity) d.Set("subnet_ids", aws.StringValueSlice(filecache.SubnetIds)) d.Set("vpc_id", filecache.VpcId) @@ -413,7 +413,7 @@ func resourceFileCacheUpdate(ctx context.Context, d *schema.ResourceData, meta i if d.HasChange("tags_all") { o, n := d.GetChange("tags_all") - if err := UpdateTags(conn, d.Get("resource_arn").(string), o, n); err != nil { + if err := UpdateTags(conn, d.Get("arn").(string), o, n); err != nil { return create.DiagError(names.FSx, create.ErrActionUpdating, ResNameFileCache, d.Id(), err) } } From cfb2ae6e289321a1692f05f9fd3e86f26e8e7feb Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Sun, 23 Oct 2022 22:20:39 -0400 Subject: [PATCH 12/32] add documentation --- website/docs/r/fsx_file_cache.html.markdown | 125 ++++++++++++++++++++ 1 file changed, 125 insertions(+) create mode 100644 website/docs/r/fsx_file_cache.html.markdown diff --git a/website/docs/r/fsx_file_cache.html.markdown b/website/docs/r/fsx_file_cache.html.markdown new file mode 100644 index 000000000000..dbe095c62786 --- /dev/null +++ b/website/docs/r/fsx_file_cache.html.markdown @@ -0,0 +1,125 @@ +--- +subcategory: "FSx" +layout: "aws" +page_title: "AWS: aws_fsx_file_cache" +description: |- + Terraform resource for managing an AWS FSx File Cache. +--- + +# Resource: aws_fsx_file_cache + +Terraform resource for managing an AWS FSx File Cache. +See the [Create File Cache](https://docs.aws.amazon.com/fsx/latest/APIReference/API_CreateFileCache.html) for more information. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_fsx_filecache" "example" { + + data_repository_association { + data_repository_path = "nfs://filer.domain.com" + data_repository_subdirectories = ["test", "test2"] + file_cache_path = "/ns1" + + nfs { + dns_ips = ["192.168.0.1", "192.168.0.2"] + version = "NFS3" + } + } + + file_cache_type = "LUSTRE" + file_cache_type_version = "2.12" + + lustre_configuration { + deployment_type = "CACHE_1" + metadata_configuration { + storage_capacity = 2400 + } + per_unit_storage_throughput = 1000 + weekly_maintenance_start_time = "2:05:00" + } + + subnet_ids = [aws_subnet.test1.id] + storage_capacity = 1200 +} +``` + +## Argument Reference + +The following arguments are required: + +* `file_cache_type` - The type of cache that you're creating. The only supported value is `LUSTRE`. +* `file_cache_type_version` - The version for the type of cache that you're creating. The only supported value is `2.12`. +* `storage_capacity` - The storage capacity of the cache in gibibytes (GiB). Valid values are `1200` GiB, `2400` GiB, and increments of `2400` GiB. +* `subnet_ids` - A list of subnet IDs that the cache will be accessible from. You can specify only one subnet ID. + +The following arguments are optional: + +* `copy_tags_to_data_repository_associations` - A boolean flag indicating whether tags for the cache should be copied to data repository associations. This value defaults to false. +* `data_repository_association` - See the [`data_repository_association` configuration](#data-repository-association-arguments) block. Max of 8. +A list of up to 8 configurations for data repository associations (DRAs) to be created during the cache creation. The DRAs link the cache to either an Amazon S3 data repository or a Network File System (NFS) data repository that supports the NFSv3 protocol. The DRA configurations must meet the following requirements: 1) All configurations on the list must be of the same data repository type, either all S3 or all NFS. A cache can't link to different data repository types at the same time. 2) An NFS DRA must link to an NFS file system that supports the NFSv3 protocol. DRA automatic import and automatic export is not supported. +* `kms_key_id` - Specifies the ID of the AWS Key Management Service (AWS KMS) key to use for encrypting data on an Amazon File Cache. If a KmsKeyId isn't specified, the Amazon FSx-managed AWS KMS key for your account is used. +* `lustre_configuration` - See the [`lustre_configuration`](#lustre-configuration-arguments) block. +* `security_group_ids` - A list of IDs specifying the security groups to apply to all network interfaces created for Amazon File Cache access. +* `tags` - (Optional) A map of tags to assign to the file cache. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +#### Data Repository Association arguments + +The following arguments are supported for `data_repository_association` configuration block: + +* `file_cache_path` - (Required) A path on the cache that points to a high-level directory (such as /ns1/) or subdirectory (such as /ns1/subdir/) that will be mapped 1-1 with DataRepositoryPath. The leading forward slash in the name is required. Two data repository associations cannot have overlapping cache paths. For example, if a data repository is associated with cache path /ns1/, then you cannot link another data repository with cache path /ns1/ns2. This path specifies where in your cache files will be exported from. This cache directory can be linked to only one data repository, and no data repository other can be linked to the directory. Note: The cache path can only be set to root (/) on an NFS DRA when DataRepositorySubdirectories is specified. If you specify root (/) as the cache path, you can create only one DRA on the cache. The cache path cannot be set to root (/) for an S3 DRA. +* `data_repository_path` - (Optional) The path to the S3 or NFS data repository that links to the cache. +* `data_repository_subdirectories` - (Optional) A list of NFS Exports that will be linked with this data repository association. The Export paths are in the format /exportpath1. To use this parameter, you must configure DataRepositoryPath as the domain name of the NFS file system. The NFS file system domain name in effect is the root of the subdirectories. Note that DataRepositorySubdirectories is not supported for S3 data repositories. Max of 500. +* `nfs` - (Optional) - (Optional) See the [`nfs` configuration](#nfs-arguments) block. + +#### NFS arguments + +The following arguments are supported for `nfs` configuration block: + +* `version` - (Required) - The version of the NFS (Network File System) protocol of the NFS data repository. The only supported value is NFS3, which indicates that the data repository must support the NFSv3 protocol. The only supported value is `NFS3`. +* `dns_ips` - (Optional) - A list of up to 2 IP addresses of DNS servers used to resolve the NFS file system domain name. The provided IP addresses can either be the IP addresses of a DNS forwarder or resolver that the customer manages and runs inside the customer VPC, or the IP addresses of the on-premises DNS servers. + +#### Lustre Configuration arguments + +The following arguments are supported for `lustre_configuration` configuration block: + +* `deployment_type` - (Required) Specifies the cache deployment type. The only supported value is `CACHE_1`. +* `metadata_configuration` - (Required) The configuration for a Lustre MDT (Metadata Target) storage volume. See the [`metadata_configuration`](#metadata-configuration-arguments) block. +* `per_unit_storage_throughput` - (Required) Provisions the amount of read and write throughput for each 1 tebibyte (TiB) of cache storage capacity, in MB/s/TiB. The only supported value is `1000`. +* `weekly_maintenance_start_time` - (Optional) A recurring weekly time, in the format `D:HH:MM`. `D` is the day of the week, for which `1` represents Monday and `7` represents Sunday. `HH` is the zero-padded hour of the day (0-23), and `MM` is the zero-padded minute of the hour. For example, 1:05:00 specifies maintenance at 5 AM Monday. See the [ISO week date](https://en.wikipedia.org/wiki/ISO_week_date) for more information. + +#### Metadata Configuration arguments + +The following arguments are supported for `metadata_configuration` configuration block: + +* `storage_capacity` - (Required) The storage capacity of the Lustre MDT (Metadata Target) storage volume in gibibytes (GiB). The only supported value is `2400` GiB. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `arn` - The Amazon Resource Name (ARN) for the resource. +* `data_repository_association_ids` - A list of IDs of data repository associations that are associated with this cache. +* `dns_name` - The Domain Name System (DNS) name for the cache. +* `file_cache_id` - The system-generated, unique ID of the cache. +* `id` - The system-generated, unique ID of the cache. +* `network_interface_ids` - A list of network interface IDs. +* `vpc_id` - The ID of your virtual private cloud (VPC). + +## Timeouts + +[Configuration options](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +FSx File Cache can be imported using the `id`, e.g., + +``` +$ terraform import aws_fsx_file_cache.example fc-8012925589 +``` From 58458e69c028d2fe094e097bf2c35f224f069cd1 Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Sun, 23 Oct 2022 22:21:00 -0400 Subject: [PATCH 13/32] add/enhance tests --- internal/service/fsx/file_cache_test.go | 490 ++++++++++++++++++++++-- 1 file changed, 452 insertions(+), 38 deletions(-) diff --git a/internal/service/fsx/file_cache_test.go b/internal/service/fsx/file_cache_test.go index ab36540213e1..513f4d57a57f 100644 --- a/internal/service/fsx/file_cache_test.go +++ b/internal/service/fsx/file_cache_test.go @@ -21,16 +21,41 @@ import ( tffsx "github.com/hashicorp/terraform-provider-aws/internal/service/fsx" ) +func TestAccFSxFileCache_serial(t *testing.T) { + testCases := map[string]map[string]func(t *testing.T){ + "FSxFileCache": { + "basic": TestAccFSxFileCache_basic, + "disappears": TestAccFSxFileCache_disappears, + "kms_key_id": testAccFSxFileCache_kmsKeyID, + "copy_tags_to_data_repository_associations": testAccFSxFileCache_copyTagsToDataRepositoryAssociations, + "data_repository_association": testAccFSxFileCache_dataRepositoryAssociation, + "security_group_id": testAccFSxFileCache_securityGroupId, + "tags": testAccFSxFileCache_tags, + }, + } + + for group, m := range testCases { + m := m + t.Run(group, func(t *testing.T) { + for name, tc := range m { + tc := tc + t.Run(name, func(t *testing.T) { + tc(t) + }) + } + }) + } +} + func TestAccFSxFileCache_basic(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } var filecache fsx.DescribeFileCachesOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_fsx_filecache.test" - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) @@ -40,22 +65,21 @@ func TestAccFSxFileCache_basic(t *testing.T) { CheckDestroy: testAccCheckFileCacheDestroy, Steps: []resource.TestStep{ { - Config: testAccFileCacheConfig_basic(rName), + Config: testAccFileCacheConfig_basic(), Check: resource.ComposeTestCheckFunc( testAccCheckFileCacheExists(resourceName, &filecache), - resource.TestCheckResourceAttrSet(resourceName, "copy_tags_to_data_repository_associations"), - resource.TestCheckResourceAttrSet(resourceName, "data_repository_association_ids"), - resource.TestCheckResourceAttrSet(resourceName, "dns_name"), - resource.TestCheckResourceAttrSet(resourceName, "file_cache_type"), - resource.TestCheckResourceAttrSet(resourceName, "file_cache_type_version"), - resource.TestCheckResourceAttrSet(resourceName, "id"), - resource.TestCheckResourceAttrSet(resourceName, "kms_key_id"), - resource.TestCheckResourceAttrSet(resourceName, "network_interface_ids"), - resource.TestCheckResourceAttrSet(resourceName, "owner_id"), - acctest.MatchResourceAttrRegionalARN(resourceName, "resource_arn", "fsx", regexp.MustCompile(`filecache:+.`)), - resource.TestCheckResourceAttrSet(resourceName, "storage_capacity"), - resource.TestCheckResourceAttrSet(resourceName, "subnet_ids"), - resource.TestCheckResourceAttrSet(resourceName, "vpc_id"), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_data_repository_associations", "false"), + resource.TestCheckResourceAttr(resourceName, "file_cache_type", "LUSTRE"), + resource.TestCheckResourceAttr(resourceName, "file_cache_type_version", "2.12"), + resource.TestMatchResourceAttr(resourceName, "id", regexp.MustCompile(`fc-.+`)), + acctest.MatchResourceAttrRegionalARN(resourceName, "kms_key_id", "kms", regexp.MustCompile(`key\/.+`)), + resource.TestCheckResourceAttr(resourceName, "lustre_configuration.0.deployment_type", "CACHE_1"), + resource.TestCheckResourceAttr(resourceName, "lustre_configuration.0.metadata_configuration.0.storage_capacity", "2400"), + resource.TestCheckResourceAttr(resourceName, "lustre_configuration.0.metadata_configuration.0.per_unit_storage_capacity", "1000"), + resource.TestCheckResourceAttr(resourceName, "lustre_configuration.0.metadata_configuration.0.weekly_maintenance_start_time", "2:05:00"), + acctest.MatchResourceAttrRegionalARN(resourceName, "resource_arn", "fsx", regexp.MustCompile(`file-cache/fc-.+`)), + resource.TestCheckResourceAttr(resourceName, "storage_capacity", "1200"), + resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", "0"), ), }, { @@ -74,10 +98,9 @@ func TestAccFSxFileCache_disappears(t *testing.T) { } var filecache fsx.DescribeFileCachesOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_fsx_filecache.test" - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) @@ -87,7 +110,7 @@ func TestAccFSxFileCache_disappears(t *testing.T) { CheckDestroy: testAccCheckFileCacheDestroy, Steps: []resource.TestStep{ { - Config: testAccFileCacheConfig_basic(rName), + Config: testAccFileCacheConfig_basic(), Check: resource.ComposeTestCheckFunc( testAccCheckFileCacheExists(resourceName, &filecache), acctest.CheckResourceDisappears(acctest.Provider, tffsx.ResourceFileCache(), resourceName), @@ -98,11 +121,152 @@ func TestAccFSxFileCache_disappears(t *testing.T) { }) } -func TestAccFSxFileCache_tags(t *testing.T) { +// Per Attribute Acceptance Tests +func testAccFSxFileCache_kmsKeyID(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var filecache1, filecache2 fsx.DescribeFileCachesOutput + kmsKeyResourceName1 := "aws_kms_key.test1" + kmsKeyResourceName2 := "aws_kms_key.test2" + resourceName := "aws_fsx_filecache.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWindowsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccFileCacheConfig_kmsKeyID1(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFileCacheExists(resourceName, &filecache2), + resource.TestCheckResourceAttrPair(resourceName, "kms_key_id", kmsKeyResourceName1, "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + }, + { + Config: testAccFileCacheConfig_kmsKeyID2(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFileCacheExists(resourceName, &filecache2), + testAccCheckFileCacheRecreated(&filecache1, &filecache2), + resource.TestCheckResourceAttrPair(resourceName, "kms_key_id", kmsKeyResourceName2, "arn"), + ), + }, + }, + }) +} + +func testAccFSxFileCache_copyTagsToDataRepositoryAssociations(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var filecache1 fsx.DescribeFileCachesOutput + resourceName := "aws_fsx_filecache.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWindowsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccFileCacheConfig_copyTagsToDataRepositoryAssociations("key1", "value1", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFileCacheExists(resourceName, &filecache1), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_data_repository_associations", "true"), + resource.TestCheckResourceAttr(resourceName, "data_repository_association.0.tags.%", "2"), + ), + }, + }, + }) +} + +func testAccFSxFileCache_dataRepositoryAssociation(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var filecache1 fsx.DescribeFileCachesOutput + resourceName := "aws_fsx_filecache.test" + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWindowsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccFileCacheConfig_NFSAssociation(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFileCacheExists(resourceName, &filecache1), + resource.TestCheckResourceAttr(resourceName, "data_repository_association.data_repository_path", "nfs://filer.domain.com"), + resource.TestCheckResourceAttr(resourceName, "data_repository_association.file_cache_path", "/ns1"), + resource.TestCheckResourceAttr(resourceName, "data_repository_association.nfs.dns_ips.0", "192.168.0.1"), + resource.TestCheckResourceAttr(resourceName, "data_repository_association.nfs.version", "NFS3"), + resource.TestCheckResourceAttr(resourceName, "data_repository_association_ids.#", "0"), + ), + }, + { + Config: testAccFileCacheConfig_S3Association(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFileCacheExists(resourceName, &filecache1), + resource.TestCheckResourceAttr(resourceName, "data_repository_association.data_repository_path", bucketName), + resource.TestCheckResourceAttr(resourceName, "data_repository_association.file_cache_path", "/ns1"), + resource.TestCheckResourceAttr(resourceName, "data_repository_association_ids.#", "0"), + resource.TestCheckResourceAttr(resourceName, "data_repository_association_ids.#", "0"), + ), + }, + { + Config: testAccFileCacheConfig_MultipleAssociations(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFileCacheExists(resourceName, &filecache1), + resource.TestCheckResourceAttr(resourceName, "data_repository_association_ids.#", "1"), + ), + }, + }, + }) +} + +func testAccFSxFileCache_securityGroupId(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var filecache1 fsx.DescribeFileCachesOutput + resourceName := "aws_fsx_filecache.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWindowsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccFileCacheConfig_securityGroupID(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFileCacheExists(resourceName, &filecache1), + resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "0"), + ), + }, + }, + }) +} + +func testAccFSxFileCache_tags(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + var filecache1, filecache2 fsx.DescribeFileCachesOutput resourceName := "aws_fsx_filecache.test" - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -135,6 +299,7 @@ func TestAccFSxFileCache_tags(t *testing.T) { } func testAccCheckFileCacheDestroy(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn ctx := context.Background() @@ -207,14 +372,12 @@ func testAccCheckFileCacheRecreated(i, j *fsx.DescribeFileCachesOutput) resource } } -func testAccFileCacheConfig_basic(rName string) string { - return testAccFileCacheBaseConfig() + fmt.Sprint(` -resource "aws_fsx_filecache" "test" { - data_repository_associations { - data_repository_path = "s3://${aws_s3_bucket.test.id}" - file_cache_path = "/ns1" - } +// Test Configurations +func testAccFileCacheConfig_basic() string { + return testAccFileCacheBaseConfig() + + ` +resource "aws_fsx_filecache" "test" { file_cache_type = "LUSTRE" file_cache_type_version = "2.12" @@ -230,7 +393,7 @@ resource "aws_fsx_filecache" "test" { subnet_ids = [aws_subnet.test1.id] storage_capacity = 1200 } -`) +` } func testAccFileCacheBaseConfig() string { @@ -253,18 +416,50 @@ resource "aws_subnet" "test1" { cidr_block = "10.0.1.0/24" availability_zone = data.aws_availability_zones.available.names[0] } +` +} -resource "aws_s3_bucket" "test" {} +func testAccFileCacheConfig_NFSAssociation() string { + return testAccFileCacheBaseConfig() + ` +resource "aws_fsx_filecache" "test" { + data_repository_association { + data_repository_path = "nfs://filer.domain.com" + data_repository_subdirectories = ["test", "test2"] + file_cache_path = "/ns1" + + nfs { + dns_ips = ["192.168.0.1", "192.168.0.2"] + version = "NFS3" + } + } + + file_cache_type = "LUSTRE" + file_cache_type_version = "2.12" + + lustre_configuration { + deployment_type = "CACHE_1" + metadata_configuration { + storage_capacity = 2400 + } + per_unit_storage_throughput = 1000 + weekly_maintenance_start_time = "2:05:00" + } + + subnet_ids = [aws_subnet.test1.id] + storage_capacity = 1200 +} ` + } -func testAccFileCache_tags1(tagKey1, tagValue1 string) string { - return testAccFileCacheBaseConfig() + fmt.Sprintf(` +func testAccFileCacheConfig_S3Association(bucketName string) string { + return testAccFileCacheBaseConfig() + + fmt.Sprintf(` resource "aws_fsx_filecache" "test" { - data_repository_associations { + data_repository_association { data_repository_path = "s3://${aws_s3_bucket.test.id}" file_cache_path = "/ns1" - } + } file_cache_type = "LUSTRE" file_cache_type_version = "2.12" @@ -280,18 +475,206 @@ resource "aws_fsx_filecache" "test" { subnet_ids = [aws_subnet.test1.id] storage_capacity = 1200 +} +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} +`, bucketName) +} + +func testAccFileCacheConfig_MultipleAssociations() string { + return testAccFileCacheBaseConfig() + ` +resource "aws_fsx_filecache" "test" { + data_repository_association { + data_repository_path = "nfs://filer2.domain.com" + data_repository_subdirectories = ["test", "test2"] + file_cache_path = "/ns2" + + nfs { + dns_ips = ["192.168.0.1", "192.168.0.2"] + version = "NFS3" + } + } + + data_repository_association { + data_repository_path = "nfs://filer.domain.com" + data_repository_subdirectories = ["test", "test2"] + file_cache_path = "/ns1" + + nfs { + dns_ips = ["192.168.0.1", "192.168.0.2"] + version = "NFS3" + } + } + + file_cache_type = "LUSTRE" + file_cache_type_version = "2.12" + + lustre_configuration { + deployment_type = "CACHE_1" + metadata_configuration { + storage_capacity = 2400 + } + per_unit_storage_throughput = 1000 + weekly_maintenance_start_time = "2:05:00" + } + + subnet_ids = [aws_subnet.test1.id] + storage_capacity = 1200 +} + + +` +} + +func testAccFileCacheConfig_copyTagsToDataRepositoryAssociations(tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return testAccFileCacheBaseConfig() + + fmt.Sprintf(` +resource "aws_fsx_filecache" "test" { + copy_tags_to_data_repository_associations = true + data_repository_association { + data_repository_path = "s3://${aws_s3_bucket.test.id}" + file_cache_path = "/ns1" + } + + file_cache_type = "LUSTRE" + file_cache_type_version = "2.12" + + lustre_configuration { + deployment_type = "CACHE_1" + metadata_configuration { + storage_capacity = 2400 + } + per_unit_storage_throughput = 1000 + weekly_maintenance_start_time = "2:05:00" + } + + subnet_ids = [aws_subnet.test1.id] + storage_capacity = 1200 tags = { %[1]q = %[2]q + %[3]q = %[4]q } } -`, tagKey1, tagValue1) +`, tagKey1, tagValue1, tagKey2, tagValue2) +} + +func testAccFileCacheConfig_kmsKeyID1() string { + return testAccFileCacheBaseConfig() + ` +resource "aws_kms_key" "test1" { + description = "FSx KMS Testing key" + deletion_window_in_days = 7 } -func testAccFileCache_tags2(tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return testAccFileCacheBaseConfig() + fmt.Sprintf(` resource "aws_fsx_filecache" "test" { - data_repository_associations { + data_repository_association { + data_repository_path = "s3://${aws_s3_bucket.test.id}" + file_cache_path = "/ns1" + } + + file_cache_type = "LUSTRE" + file_cache_type_version = "2.12" + + lustre_configuration { + deployment_type = "CACHE_1" + metadata_configuration { + storage_capacity = 2400 + } + per_unit_storage_throughput = 1000 + weekly_maintenance_start_time = "2:05:00" + } + + kms_key_id = aws_kms_key.test1.arn + subnet_ids = [aws_subnet.test1.id] + storage_capacity = 1200 +} +` +} + +func testAccFileCacheConfig_kmsKeyID2() string { + return testAccFileCacheBaseConfig() + ` +resource "aws_kms_key" "test2" { + description = "FSx KMS Testing key" + deletion_window_in_days = 7 +} + +resource "aws_fsx_filecache" "test" { + data_repository_association { + data_repository_path = "s3://${aws_s3_bucket.test.id}" + file_cache_path = "/ns1" + } + + file_cache_type = "LUSTRE" + file_cache_type_version = "2.12" + + lustre_configuration { + deployment_type = "CACHE_1" + metadata_configuration { + storage_capacity = 2400 + } + per_unit_storage_throughput = 1000 + weekly_maintenance_start_time = "2:05:00" + } + + kms_key_id = aws_kms_key.test2.arn + subnet_ids = [aws_subnet.test1.id] + storage_capacity = 1200 +} +` +} + +func testAccFileCacheConfig_securityGroupID() string { + return testAccFileCacheBaseConfig() + ` +resource "aws_security_group" "test1" { + description = "security group for FSx testing" + vpc_id = aws_vpc.test.id + + ingress { + cidr_blocks = [aws_vpc.test.cidr_block] + from_port = 0 + protocol = -1 + to_port = 0 + } + + egress { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + to_port = 0 + } +} + +resource "aws_fsx_filecache" "test" { + data_repository_association { + data_repository_path = "s3://${aws_s3_bucket.test.id}" + file_cache_path = "/ns1" + } + + file_cache_type = "LUSTRE" + file_cache_type_version = "2.12" + + lustre_configuration { + deployment_type = "CACHE_1" + metadata_configuration { + storage_capacity = 2400 + } + per_unit_storage_throughput = 1000 + weekly_maintenance_start_time = "2:05:00" + } + + security_group_ids = [aws_security_group.test1.id] + subnet_ids = [aws_subnet.test1.id] + storage_capacity = 1200 +} +` +} + +func testAccFileCache_tags1(tagKey1, tagValue1 string) string { + return testAccFileCacheBaseConfig() + + fmt.Sprintf(` +resource "aws_fsx_filecache" "test" { + data_repository_association { data_repository_path = "s3://${aws_s3_bucket.test.id}" file_cache_path = "/ns1" } @@ -308,6 +691,37 @@ resource "aws_fsx_filecache" "test" { weekly_maintenance_start_time = "2:05:00" } + subnet_ids = [aws_subnet.test1.id] + storage_capacity = 1200 + + tags = { + %[1]q = %[2]q + } +} +`, tagKey1, tagValue1) +} + +func testAccFileCache_tags2(tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return testAccFileCacheBaseConfig() + + fmt.Sprintf(` +resource "aws_fsx_filecache" "test" { + data_repository_association { + data_repository_path = "s3://${aws_s3_bucket.test.id}" + file_cache_path = "/ns1" + } + + file_cache_type = "LUSTRE" + file_cache_type_version = "2.12" + + lustre_configuration { + deployment_type = "CACHE_1" + metadata_configuration { + storage_capacity = 2400 + } + per_unit_storage_throughput = 1000 + weekly_maintenance_start_time = "2:05:00" + } + subnet_ids = [aws_subnet.test1.id] storage_capacity = 1200 From 950ba3b8695d4a72a9e0a4c9cde85ffe4d40adca Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Mon, 24 Oct 2022 08:37:15 -0400 Subject: [PATCH 14/32] fix golangci-lint findings --- internal/service/fsx/file_cache.go | 7 +++--- internal/service/fsx/file_cache_test.go | 32 +------------------------ internal/service/fsx/find.go | 4 +--- internal/service/fsx/wait.go | 7 +++--- 4 files changed, 8 insertions(+), 42 deletions(-) diff --git a/internal/service/fsx/file_cache.go b/internal/service/fsx/file_cache.go index 46e86c97fe32..e052eebc8b8a 100644 --- a/internal/service/fsx/file_cache.go +++ b/internal/service/fsx/file_cache.go @@ -191,7 +191,6 @@ func ResourceFileCache() *schema.Resource { "log_configuration": { Type: schema.TypeList, Computed: true, - ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "destination": { @@ -333,7 +332,7 @@ func resourceFileCacheCreate(ctx context.Context, d *schema.ResourceData, meta i d.SetId(aws.StringValue(result.FileCache.FileCacheId)) - if _, err := waitFileCacheCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + if _, err := waitFileCacheCreated(conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return create.DiagError(names.FSx, create.ErrActionWaitingForCreation, ResNameFileCache, d.Id(), err) } @@ -435,7 +434,7 @@ func resourceFileCacheUpdate(ctx context.Context, d *schema.ResourceData, meta i if err != nil { return create.DiagError(names.FSx, create.ErrActionUpdating, ResNameFileCache, d.Id(), err) } - if _, err := waitFileCacheUpdated(ctx, conn, aws.StringValue(result.FileCache.FileCacheId), d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitFileCacheUpdated(conn, aws.StringValue(result.FileCache.FileCacheId), d.Timeout(schema.TimeoutUpdate)); err != nil { return create.DiagError(names.FSx, create.ErrActionWaitingForUpdate, ResNameFileCache, d.Id(), err) } @@ -458,7 +457,7 @@ func resourceFileCacheDelete(ctx context.Context, d *schema.ResourceData, meta i if err != nil { return create.DiagError(names.FSx, create.ErrActionDeleting, ResNameFileCache, d.Id(), err) } - if _, err := waitFileCacheDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + if _, err := waitFileCacheDeleted(conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { return create.DiagError(names.FSx, create.ErrActionWaitingForDeletion, ResNameFileCache, d.Id(), err) } diff --git a/internal/service/fsx/file_cache_test.go b/internal/service/fsx/file_cache_test.go index 513f4d57a57f..05e8e112284b 100644 --- a/internal/service/fsx/file_cache_test.go +++ b/internal/service/fsx/file_cache_test.go @@ -251,7 +251,7 @@ func testAccFSxFileCache_securityGroupId(t *testing.T) { Config: testAccFileCacheConfig_securityGroupID(), Check: resource.ComposeTestCheckFunc( testAccCheckFileCacheExists(resourceName, &filecache1), - resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "0"), + resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "1"), ), }, }, @@ -532,11 +532,6 @@ func testAccFileCacheConfig_copyTagsToDataRepositoryAssociations(tagKey1, tagVal fmt.Sprintf(` resource "aws_fsx_filecache" "test" { copy_tags_to_data_repository_associations = true - data_repository_association { - data_repository_path = "s3://${aws_s3_bucket.test.id}" - file_cache_path = "/ns1" - } - file_cache_type = "LUSTRE" file_cache_type_version = "2.12" @@ -568,11 +563,6 @@ resource "aws_kms_key" "test1" { } resource "aws_fsx_filecache" "test" { - data_repository_association { - data_repository_path = "s3://${aws_s3_bucket.test.id}" - file_cache_path = "/ns1" - } - file_cache_type = "LUSTRE" file_cache_type_version = "2.12" @@ -600,11 +590,6 @@ resource "aws_kms_key" "test2" { } resource "aws_fsx_filecache" "test" { - data_repository_association { - data_repository_path = "s3://${aws_s3_bucket.test.id}" - file_cache_path = "/ns1" - } - file_cache_type = "LUSTRE" file_cache_type_version = "2.12" @@ -646,11 +631,6 @@ resource "aws_security_group" "test1" { } resource "aws_fsx_filecache" "test" { - data_repository_association { - data_repository_path = "s3://${aws_s3_bucket.test.id}" - file_cache_path = "/ns1" - } - file_cache_type = "LUSTRE" file_cache_type_version = "2.12" @@ -674,11 +654,6 @@ func testAccFileCache_tags1(tagKey1, tagValue1 string) string { return testAccFileCacheBaseConfig() + fmt.Sprintf(` resource "aws_fsx_filecache" "test" { - data_repository_association { - data_repository_path = "s3://${aws_s3_bucket.test.id}" - file_cache_path = "/ns1" - } - file_cache_type = "LUSTRE" file_cache_type_version = "2.12" @@ -705,11 +680,6 @@ func testAccFileCache_tags2(tagKey1, tagValue1, tagKey2, tagValue2 string) strin return testAccFileCacheBaseConfig() + fmt.Sprintf(` resource "aws_fsx_filecache" "test" { - data_repository_association { - data_repository_path = "s3://${aws_s3_bucket.test.id}" - file_cache_path = "/ns1" - } - file_cache_type = "LUSTRE" file_cache_type_version = "2.12" diff --git a/internal/service/fsx/find.go b/internal/service/fsx/find.go index 29c392a8c88c..558c615b082d 100644 --- a/internal/service/fsx/find.go +++ b/internal/service/fsx/find.go @@ -98,9 +98,7 @@ func findDataRepositoryAssociationsByIDs(conn *fsx.FSx, ids []*string) ([]*fsx.D if page == nil { return !lastPage } - for _, dataRepositoryAssociation := range page.Associations { - dataRepositoryAssociations = append(dataRepositoryAssociations, dataRepositoryAssociation) - } + dataRepositoryAssociations = append(dataRepositoryAssociations, page.Associations...) return !lastPage }) diff --git a/internal/service/fsx/wait.go b/internal/service/fsx/wait.go index 98120ef8a7d2..ad705c2ef03c 100644 --- a/internal/service/fsx/wait.go +++ b/internal/service/fsx/wait.go @@ -1,7 +1,6 @@ package fsx import ( - "context" "errors" "time" @@ -72,7 +71,7 @@ func waitBackupDeleted(conn *fsx.FSx, id string) (*fsx.Backup, error) { return nil, err } -func waitFileCacheCreated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.FileCache, error) { +func waitFileCacheCreated(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.FileCache, error) { stateConf := &resource.StateChangeConf{ Pending: []string{fsx.FileCacheLifecycleCreating}, Target: []string{fsx.FileCacheLifecycleAvailable}, @@ -92,7 +91,7 @@ func waitFileCacheCreated(ctx context.Context, conn *fsx.FSx, id string, timeout return nil, err } -func waitFileCacheUpdated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.FileCache, error) { +func waitFileCacheUpdated(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.FileCache, error) { stateConf := &resource.StateChangeConf{ Pending: []string{fsx.FileCacheLifecycleUpdating}, Target: []string{fsx.FileCacheLifecycleAvailable}, @@ -113,7 +112,7 @@ func waitFileCacheUpdated(ctx context.Context, conn *fsx.FSx, id string, timeout return nil, err } -func waitFileCacheDeleted(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.FileCache, error) { +func waitFileCacheDeleted(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.FileCache, error) { stateConf := &resource.StateChangeConf{ Pending: []string{fsx.FileCacheLifecycleAvailable, fsx.FileCacheLifecycleDeleting}, Target: []string{}, From f6aa09a96d153f08c1470243b9ddb82959665dc5 Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Mon, 24 Oct 2022 08:39:18 -0400 Subject: [PATCH 15/32] update description for lustre_configuration --- website/docs/r/fsx_file_cache.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/fsx_file_cache.html.markdown b/website/docs/r/fsx_file_cache.html.markdown index dbe095c62786..8a08a5b3c5fe 100644 --- a/website/docs/r/fsx_file_cache.html.markdown +++ b/website/docs/r/fsx_file_cache.html.markdown @@ -61,7 +61,7 @@ The following arguments are optional: * `data_repository_association` - See the [`data_repository_association` configuration](#data-repository-association-arguments) block. Max of 8. A list of up to 8 configurations for data repository associations (DRAs) to be created during the cache creation. The DRAs link the cache to either an Amazon S3 data repository or a Network File System (NFS) data repository that supports the NFSv3 protocol. The DRA configurations must meet the following requirements: 1) All configurations on the list must be of the same data repository type, either all S3 or all NFS. A cache can't link to different data repository types at the same time. 2) An NFS DRA must link to an NFS file system that supports the NFSv3 protocol. DRA automatic import and automatic export is not supported. * `kms_key_id` - Specifies the ID of the AWS Key Management Service (AWS KMS) key to use for encrypting data on an Amazon File Cache. If a KmsKeyId isn't specified, the Amazon FSx-managed AWS KMS key for your account is used. -* `lustre_configuration` - See the [`lustre_configuration`](#lustre-configuration-arguments) block. +* `lustre_configuration` - See the [`lustre_configuration`](#lustre-configuration-arguments) block. Required when `file_cache_type` is `LUSTRE`. * `security_group_ids` - A list of IDs specifying the security groups to apply to all network interfaces created for Amazon File Cache access. * `tags` - (Optional) A map of tags to assign to the file cache. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -118,7 +118,7 @@ In addition to all arguments above, the following attributes are exported: ## Import -FSx File Cache can be imported using the `id`, e.g., +FSx File Cache can be imported using the `id`. ``` $ terraform import aws_fsx_file_cache.example fc-8012925589 From 34a5d1706667552e1f1e9afad573d71a5a085ae2 Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Mon, 24 Oct 2022 08:55:50 -0400 Subject: [PATCH 16/32] terrafmt --- internal/service/fsx/file_cache_test.go | 104 ++++++++++++------------ 1 file changed, 52 insertions(+), 52 deletions(-) diff --git a/internal/service/fsx/file_cache_test.go b/internal/service/fsx/file_cache_test.go index 05e8e112284b..6a1693d7182d 100644 --- a/internal/service/fsx/file_cache_test.go +++ b/internal/service/fsx/file_cache_test.go @@ -456,28 +456,28 @@ func testAccFileCacheConfig_S3Association(bucketName string) string { return testAccFileCacheBaseConfig() + fmt.Sprintf(` resource "aws_fsx_filecache" "test" { - data_repository_association { - data_repository_path = "s3://${aws_s3_bucket.test.id}" - file_cache_path = "/ns1" - } - - file_cache_type = "LUSTRE" - file_cache_type_version = "2.12" - - lustre_configuration { - deployment_type = "CACHE_1" - metadata_configuration { - storage_capacity = 2400 - } - per_unit_storage_throughput = 1000 - weekly_maintenance_start_time = "2:05:00" - } - - subnet_ids = [aws_subnet.test1.id] - storage_capacity = 1200 + data_repository_association { + data_repository_path = "s3://${aws_s3_bucket.test.id}" + file_cache_path = "/ns1" + } + + file_cache_type = "LUSTRE" + file_cache_type_version = "2.12" + + lustre_configuration { + deployment_type = "CACHE_1" + metadata_configuration { + storage_capacity = 2400 + } + per_unit_storage_throughput = 1000 + weekly_maintenance_start_time = "2:05:00" + } + + subnet_ids = [aws_subnet.test1.id] + storage_capacity = 1200 } resource "aws_s3_bucket" "test" { - bucket = %[1]q + bucket = %[1]q } `, bucketName) } @@ -532,8 +532,8 @@ func testAccFileCacheConfig_copyTagsToDataRepositoryAssociations(tagKey1, tagVal fmt.Sprintf(` resource "aws_fsx_filecache" "test" { copy_tags_to_data_repository_associations = true - file_cache_type = "LUSTRE" - file_cache_type_version = "2.12" + file_cache_type = "LUSTRE" + file_cache_type_version = "2.12" lustre_configuration { deployment_type = "CACHE_1" @@ -654,20 +654,20 @@ func testAccFileCache_tags1(tagKey1, tagValue1 string) string { return testAccFileCacheBaseConfig() + fmt.Sprintf(` resource "aws_fsx_filecache" "test" { - file_cache_type = "LUSTRE" - file_cache_type_version = "2.12" - - lustre_configuration { - deployment_type = "CACHE_1" - metadata_configuration { - storage_capacity = 2400 - } - per_unit_storage_throughput = 1000 - weekly_maintenance_start_time = "2:05:00" - } - - subnet_ids = [aws_subnet.test1.id] - storage_capacity = 1200 + file_cache_type = "LUSTRE" + file_cache_type_version = "2.12" + + lustre_configuration { + deployment_type = "CACHE_1" + metadata_configuration { + storage_capacity = 2400 + } + per_unit_storage_throughput = 1000 + weekly_maintenance_start_time = "2:05:00" + } + + subnet_ids = [aws_subnet.test1.id] + storage_capacity = 1200 tags = { %[1]q = %[2]q @@ -680,25 +680,25 @@ func testAccFileCache_tags2(tagKey1, tagValue1, tagKey2, tagValue2 string) strin return testAccFileCacheBaseConfig() + fmt.Sprintf(` resource "aws_fsx_filecache" "test" { - file_cache_type = "LUSTRE" - file_cache_type_version = "2.12" + file_cache_type = "LUSTRE" + file_cache_type_version = "2.12" - lustre_configuration { - deployment_type = "CACHE_1" - metadata_configuration { - storage_capacity = 2400 - } - per_unit_storage_throughput = 1000 - weekly_maintenance_start_time = "2:05:00" - } + lustre_configuration { + deployment_type = "CACHE_1" + metadata_configuration { + storage_capacity = 2400 + } + per_unit_storage_throughput = 1000 + weekly_maintenance_start_time = "2:05:00" + } - subnet_ids = [aws_subnet.test1.id] - storage_capacity = 1200 + subnet_ids = [aws_subnet.test1.id] + storage_capacity = 1200 - tags = { - %[1]q = %[2]q - %[3]q = %[4]q - } + tags = { + %[1]q = %[2]q + %[3]q = %[4]q + } } `, tagKey1, tagValue1, tagKey2, tagValue2) } From ba281f482e055dfb58a59674e41fb58121660e88 Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Mon, 24 Oct 2022 09:35:21 -0400 Subject: [PATCH 17/32] fix lint errors --- website/docs/r/fsx_file_cache.html.markdown | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/website/docs/r/fsx_file_cache.html.markdown b/website/docs/r/fsx_file_cache.html.markdown index 8a08a5b3c5fe..d2ec3234f052 100644 --- a/website/docs/r/fsx_file_cache.html.markdown +++ b/website/docs/r/fsx_file_cache.html.markdown @@ -10,9 +10,6 @@ description: |- Terraform resource for managing an AWS FSx File Cache. See the [Create File Cache](https://docs.aws.amazon.com/fsx/latest/APIReference/API_CreateFileCache.html) for more information. - -## Example Usage - ### Basic Usage ```terraform @@ -100,7 +97,7 @@ The following arguments are supported for `metadata_configuration` configuration In addition to all arguments above, the following attributes are exported: -* `arn` - The Amazon Resource Name (ARN) for the resource. +* `arn` - The Amazon Resource Name (ARN) for the resource. * `data_repository_association_ids` - A list of IDs of data repository associations that are associated with this cache. * `dns_name` - The Domain Name System (DNS) name for the cache. * `file_cache_id` - The system-generated, unique ID of the cache. @@ -118,7 +115,7 @@ In addition to all arguments above, the following attributes are exported: ## Import -FSx File Cache can be imported using the `id`. +FSx File Cache can be imported using the resource `id`. ``` $ terraform import aws_fsx_file_cache.example fc-8012925589 From ff24f74a421a5bbdbcb98fa91fab3b2b22cd21d6 Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Mon, 24 Oct 2022 09:47:10 -0400 Subject: [PATCH 18/32] fix name from aws_fsx_filecache to aws_fsx_file_cache --- internal/provider/provider.go | 2 +- internal/service/fsx/file_cache_test.go | 69 ++++++++++----------- website/docs/r/fsx_file_cache.html.markdown | 2 +- 3 files changed, 36 insertions(+), 37 deletions(-) diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 45d206ec16c6..a37be67a71b9 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -1524,7 +1524,7 @@ func New(_ context.Context) (*schema.Provider, error) { "aws_fsx_backup": fsx.ResourceBackup(), "aws_fsx_lustre_file_system": fsx.ResourceLustreFileSystem(), "aws_fsx_data_repository_association": fsx.ResourceDataRepositoryAssociation(), - "aws_fsx_filecache": fsx.ResourceFileCache(), + "aws_fsx_file_cache": fsx.ResourceFileCache(), "aws_fsx_ontap_file_system": fsx.ResourceOntapFileSystem(), "aws_fsx_ontap_storage_virtual_machine": fsx.ResourceOntapStorageVirtualMachine(), "aws_fsx_ontap_volume": fsx.ResourceOntapVolume(), diff --git a/internal/service/fsx/file_cache_test.go b/internal/service/fsx/file_cache_test.go index 6a1693d7182d..27c4930a7d26 100644 --- a/internal/service/fsx/file_cache_test.go +++ b/internal/service/fsx/file_cache_test.go @@ -16,21 +16,20 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" - "github.com/hashicorp/terraform-provider-aws/names" - tffsx "github.com/hashicorp/terraform-provider-aws/internal/service/fsx" + "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccFSxFileCache_serial(t *testing.T) { +func TestAccFileCache_serial(t *testing.T) { testCases := map[string]map[string]func(t *testing.T){ "FSxFileCache": { - "basic": TestAccFSxFileCache_basic, - "disappears": TestAccFSxFileCache_disappears, - "kms_key_id": testAccFSxFileCache_kmsKeyID, - "copy_tags_to_data_repository_associations": testAccFSxFileCache_copyTagsToDataRepositoryAssociations, - "data_repository_association": testAccFSxFileCache_dataRepositoryAssociation, - "security_group_id": testAccFSxFileCache_securityGroupId, - "tags": testAccFSxFileCache_tags, + "basic": TestAccFileCache_basic, + "disappears": TestAccFileCache_disappears, + "kms_key_id": testAccFileCache_kmsKeyID, + "copy_tags_to_data_repository_associations": testAccFileCache_copyTagsToDataRepositoryAssociations, + "data_repository_association": testAccFileCache_dataRepositoryAssociation, + "security_group_id": testAccFileCache_securityGroupId, + "tags": testAccFileCache_tags, }, } @@ -47,13 +46,13 @@ func TestAccFSxFileCache_serial(t *testing.T) { } } -func TestAccFSxFileCache_basic(t *testing.T) { +func TestAccFileCache_basic(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } var filecache fsx.DescribeFileCachesOutput - resourceName := "aws_fsx_filecache.test" + resourceName := "aws_fsx_file_cache.test" resource.Test(t, resource.TestCase{ PreCheck: func() { @@ -92,13 +91,13 @@ func TestAccFSxFileCache_basic(t *testing.T) { }) } -func TestAccFSxFileCache_disappears(t *testing.T) { +func TestAccFileCache_disappears(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } var filecache fsx.DescribeFileCachesOutput - resourceName := "aws_fsx_filecache.test" + resourceName := "aws_fsx_file_cache.test" resource.Test(t, resource.TestCase{ PreCheck: func() { @@ -122,7 +121,7 @@ func TestAccFSxFileCache_disappears(t *testing.T) { } // Per Attribute Acceptance Tests -func testAccFSxFileCache_kmsKeyID(t *testing.T) { +func testAccFileCache_kmsKeyID(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -130,7 +129,7 @@ func testAccFSxFileCache_kmsKeyID(t *testing.T) { var filecache1, filecache2 fsx.DescribeFileCachesOutput kmsKeyResourceName1 := "aws_kms_key.test1" kmsKeyResourceName2 := "aws_kms_key.test2" - resourceName := "aws_fsx_filecache.test" + resourceName := "aws_fsx_file_cache.test" resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, @@ -161,13 +160,13 @@ func testAccFSxFileCache_kmsKeyID(t *testing.T) { }) } -func testAccFSxFileCache_copyTagsToDataRepositoryAssociations(t *testing.T) { +func testAccFileCache_copyTagsToDataRepositoryAssociations(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } var filecache1 fsx.DescribeFileCachesOutput - resourceName := "aws_fsx_filecache.test" + resourceName := "aws_fsx_file_cache.test" resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, @@ -187,13 +186,13 @@ func testAccFSxFileCache_copyTagsToDataRepositoryAssociations(t *testing.T) { }) } -func testAccFSxFileCache_dataRepositoryAssociation(t *testing.T) { +func testAccFileCache_dataRepositoryAssociation(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } var filecache1 fsx.DescribeFileCachesOutput - resourceName := "aws_fsx_filecache.test" + resourceName := "aws_fsx_file_cache.test" bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, @@ -233,13 +232,13 @@ func testAccFSxFileCache_dataRepositoryAssociation(t *testing.T) { }) } -func testAccFSxFileCache_securityGroupId(t *testing.T) { +func testAccFileCache_securityGroupId(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } var filecache1 fsx.DescribeFileCachesOutput - resourceName := "aws_fsx_filecache.test" + resourceName := "aws_fsx_file_cache.test" resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, @@ -258,13 +257,13 @@ func testAccFSxFileCache_securityGroupId(t *testing.T) { }) } -func testAccFSxFileCache_tags(t *testing.T) { +func testAccFileCache_tags(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } var filecache1, filecache2 fsx.DescribeFileCachesOutput - resourceName := "aws_fsx_filecache.test" + resourceName := "aws_fsx_file_cache.test" resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, @@ -304,7 +303,7 @@ func testAccCheckFileCacheDestroy(s *terraform.State) error { ctx := context.Background() for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_fsx_filecache" { + if rs.Type != "aws_fsx_file_cache" { continue } @@ -377,7 +376,7 @@ func testAccCheckFileCacheRecreated(i, j *fsx.DescribeFileCachesOutput) resource func testAccFileCacheConfig_basic() string { return testAccFileCacheBaseConfig() + ` -resource "aws_fsx_filecache" "test" { +resource "aws_fsx_file_cache" "test" { file_cache_type = "LUSTRE" file_cache_type_version = "2.12" @@ -421,7 +420,7 @@ resource "aws_subnet" "test1" { func testAccFileCacheConfig_NFSAssociation() string { return testAccFileCacheBaseConfig() + ` -resource "aws_fsx_filecache" "test" { +resource "aws_fsx_file_cache" "test" { data_repository_association { data_repository_path = "nfs://filer.domain.com" data_repository_subdirectories = ["test", "test2"] @@ -455,7 +454,7 @@ resource "aws_fsx_filecache" "test" { func testAccFileCacheConfig_S3Association(bucketName string) string { return testAccFileCacheBaseConfig() + fmt.Sprintf(` -resource "aws_fsx_filecache" "test" { +resource "aws_fsx_file_cache" "test" { data_repository_association { data_repository_path = "s3://${aws_s3_bucket.test.id}" file_cache_path = "/ns1" @@ -484,7 +483,7 @@ resource "aws_s3_bucket" "test" { func testAccFileCacheConfig_MultipleAssociations() string { return testAccFileCacheBaseConfig() + ` -resource "aws_fsx_filecache" "test" { +resource "aws_fsx_file_cache" "test" { data_repository_association { data_repository_path = "nfs://filer2.domain.com" data_repository_subdirectories = ["test", "test2"] @@ -530,7 +529,7 @@ resource "aws_fsx_filecache" "test" { func testAccFileCacheConfig_copyTagsToDataRepositoryAssociations(tagKey1, tagValue1, tagKey2, tagValue2 string) string { return testAccFileCacheBaseConfig() + fmt.Sprintf(` -resource "aws_fsx_filecache" "test" { +resource "aws_fsx_file_cache" "test" { copy_tags_to_data_repository_associations = true file_cache_type = "LUSTRE" file_cache_type_version = "2.12" @@ -562,7 +561,7 @@ resource "aws_kms_key" "test1" { deletion_window_in_days = 7 } -resource "aws_fsx_filecache" "test" { +resource "aws_fsx_file_cache" "test" { file_cache_type = "LUSTRE" file_cache_type_version = "2.12" @@ -589,7 +588,7 @@ resource "aws_kms_key" "test2" { deletion_window_in_days = 7 } -resource "aws_fsx_filecache" "test" { +resource "aws_fsx_file_cache" "test" { file_cache_type = "LUSTRE" file_cache_type_version = "2.12" @@ -630,7 +629,7 @@ resource "aws_security_group" "test1" { } } -resource "aws_fsx_filecache" "test" { +resource "aws_fsx_file_cache" "test" { file_cache_type = "LUSTRE" file_cache_type_version = "2.12" @@ -653,7 +652,7 @@ resource "aws_fsx_filecache" "test" { func testAccFileCache_tags1(tagKey1, tagValue1 string) string { return testAccFileCacheBaseConfig() + fmt.Sprintf(` -resource "aws_fsx_filecache" "test" { +resource "aws_fsx_file_cache" "test" { file_cache_type = "LUSTRE" file_cache_type_version = "2.12" @@ -679,7 +678,7 @@ resource "aws_fsx_filecache" "test" { func testAccFileCache_tags2(tagKey1, tagValue1, tagKey2, tagValue2 string) string { return testAccFileCacheBaseConfig() + fmt.Sprintf(` -resource "aws_fsx_filecache" "test" { +resource "aws_fsx_file_cache" "test" { file_cache_type = "LUSTRE" file_cache_type_version = "2.12" diff --git a/website/docs/r/fsx_file_cache.html.markdown b/website/docs/r/fsx_file_cache.html.markdown index d2ec3234f052..e2ea5df33ba8 100644 --- a/website/docs/r/fsx_file_cache.html.markdown +++ b/website/docs/r/fsx_file_cache.html.markdown @@ -13,7 +13,7 @@ See the [Create File Cache](https://docs.aws.amazon.com/fsx/latest/APIReference/ ### Basic Usage ```terraform -resource "aws_fsx_filecache" "example" { +resource "aws_fsx_file_cache" "example" { data_repository_association { data_repository_path = "nfs://filer.domain.com" From dbe3a53696bd1d9add5fef70644123772557afc1 Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Mon, 24 Oct 2022 10:15:39 -0400 Subject: [PATCH 19/32] address golangci-lint findings --- internal/service/fsx/file_cache.go | 2 -- internal/service/fsx/file_cache_test.go | 2 -- internal/service/fsx/find.go | 1 - 3 files changed, 5 deletions(-) diff --git a/internal/service/fsx/file_cache.go b/internal/service/fsx/file_cache.go index e052eebc8b8a..530a730bb315 100644 --- a/internal/service/fsx/file_cache.go +++ b/internal/service/fsx/file_cache.go @@ -406,7 +406,6 @@ func resourceFileCacheRead(ctx context.Context, d *schema.ResourceData, meta int } func resourceFileCacheUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).FSxConn if d.HasChange("tags_all") { @@ -437,7 +436,6 @@ func resourceFileCacheUpdate(ctx context.Context, d *schema.ResourceData, meta i if _, err := waitFileCacheUpdated(conn, aws.StringValue(result.FileCache.FileCacheId), d.Timeout(schema.TimeoutUpdate)); err != nil { return create.DiagError(names.FSx, create.ErrActionWaitingForUpdate, ResNameFileCache, d.Id(), err) } - } return resourceFileCacheRead(ctx, d, meta) } diff --git a/internal/service/fsx/file_cache_test.go b/internal/service/fsx/file_cache_test.go index 27c4930a7d26..ca6356417028 100644 --- a/internal/service/fsx/file_cache_test.go +++ b/internal/service/fsx/file_cache_test.go @@ -298,7 +298,6 @@ func testAccFileCache_tags(t *testing.T) { } func testAccCheckFileCacheDestroy(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn ctx := context.Background() @@ -448,7 +447,6 @@ resource "aws_fsx_file_cache" "test" { storage_capacity = 1200 } ` - } func testAccFileCacheConfig_S3Association(bucketName string) string { diff --git a/internal/service/fsx/find.go b/internal/service/fsx/find.go index 558c615b082d..29777c3cb168 100644 --- a/internal/service/fsx/find.go +++ b/internal/service/fsx/find.go @@ -55,7 +55,6 @@ func FindBackupByID(conn *fsx.FSx, id string) (*fsx.Backup, error) { } func findFileCacheByID(conn *fsx.FSx, id string) (*fsx.FileCache, error) { - input := &fsx.DescribeFileCachesInput{ FileCacheIds: []*string{aws.String(id)}, } From 9d4a0dfb3bb8831dcf2f3b2ec24f959bb2cdf7d1 Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Mon, 24 Oct 2022 10:18:56 -0400 Subject: [PATCH 20/32] address semgrep findings --- internal/service/fsx/file_cache_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/service/fsx/file_cache_test.go b/internal/service/fsx/file_cache_test.go index ca6356417028..32613163d49c 100644 --- a/internal/service/fsx/file_cache_test.go +++ b/internal/service/fsx/file_cache_test.go @@ -20,7 +20,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccFileCache_serial(t *testing.T) { +func TestAccFSxFileCache_serial(t *testing.T) { testCases := map[string]map[string]func(t *testing.T){ "FSxFileCache": { "basic": TestAccFileCache_basic, @@ -46,7 +46,7 @@ func TestAccFileCache_serial(t *testing.T) { } } -func TestAccFileCache_basic(t *testing.T) { +func TestAccFSxFileCache_basic(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -91,7 +91,7 @@ func TestAccFileCache_basic(t *testing.T) { }) } -func TestAccFileCache_disappears(t *testing.T) { +func TestAccFSxFileCache_disappears(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } From ac4c9854be4be1a997b43161c6624976e9728e63 Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Mon, 24 Oct 2022 10:26:40 -0400 Subject: [PATCH 21/32] fix test names --- internal/service/fsx/file_cache_test.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/internal/service/fsx/file_cache_test.go b/internal/service/fsx/file_cache_test.go index 32613163d49c..1b25a10681c2 100644 --- a/internal/service/fsx/file_cache_test.go +++ b/internal/service/fsx/file_cache_test.go @@ -201,7 +201,7 @@ func testAccFileCache_dataRepositoryAssociation(t *testing.T) { CheckDestroy: testAccCheckWindowsFileSystemDestroy, Steps: []resource.TestStep{ { - Config: testAccFileCacheConfig_NFSAssociation(), + Config: testAccFileCacheConfig_nfs_association(), Check: resource.ComposeTestCheckFunc( testAccCheckFileCacheExists(resourceName, &filecache1), resource.TestCheckResourceAttr(resourceName, "data_repository_association.data_repository_path", "nfs://filer.domain.com"), @@ -212,7 +212,7 @@ func testAccFileCache_dataRepositoryAssociation(t *testing.T) { ), }, { - Config: testAccFileCacheConfig_S3Association(bucketName), + Config: testAccFileCacheConfig_s3_association(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckFileCacheExists(resourceName, &filecache1), resource.TestCheckResourceAttr(resourceName, "data_repository_association.data_repository_path", bucketName), @@ -222,7 +222,7 @@ func testAccFileCache_dataRepositoryAssociation(t *testing.T) { ), }, { - Config: testAccFileCacheConfig_MultipleAssociations(), + Config: testAccFileCacheConfig_multiple_associations(), Check: resource.ComposeTestCheckFunc( testAccCheckFileCacheExists(resourceName, &filecache1), resource.TestCheckResourceAttr(resourceName, "data_repository_association_ids.#", "1"), @@ -272,7 +272,7 @@ func testAccFileCache_tags(t *testing.T) { CheckDestroy: testAccCheckFileCacheDestroy, Steps: []resource.TestStep{ { - Config: testAccFileCache_tags1("key1", "value1"), + Config: testAccFileCacheConfig_tags1("key1", "value1"), Check: resource.ComposeTestCheckFunc( testAccCheckFileCacheExists(resourceName, &filecache1), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), @@ -284,7 +284,7 @@ func testAccFileCache_tags(t *testing.T) { ImportState: true, }, { - Config: testAccFileCache_tags2("key1", "value1updated", "key2", "value2"), + Config: testAccFileCacheConfig_tags2("key1", "value1updated", "key2", "value2"), Check: resource.ComposeTestCheckFunc( testAccCheckFileCacheExists(resourceName, &filecache2), testAccCheckFileCacheNotRecreated(&filecache1, &filecache2), @@ -417,7 +417,7 @@ resource "aws_subnet" "test1" { ` } -func testAccFileCacheConfig_NFSAssociation() string { +func testAccFileCacheConfig_nfs_association() string { return testAccFileCacheBaseConfig() + ` resource "aws_fsx_file_cache" "test" { data_repository_association { @@ -449,7 +449,7 @@ resource "aws_fsx_file_cache" "test" { ` } -func testAccFileCacheConfig_S3Association(bucketName string) string { +func testAccFileCacheConfig_s3_association(bucketName string) string { return testAccFileCacheBaseConfig() + fmt.Sprintf(` resource "aws_fsx_file_cache" "test" { @@ -479,7 +479,7 @@ resource "aws_s3_bucket" "test" { `, bucketName) } -func testAccFileCacheConfig_MultipleAssociations() string { +func testAccFileCacheConfig_multiple_associations() string { return testAccFileCacheBaseConfig() + ` resource "aws_fsx_file_cache" "test" { data_repository_association { @@ -647,7 +647,7 @@ resource "aws_fsx_file_cache" "test" { ` } -func testAccFileCache_tags1(tagKey1, tagValue1 string) string { +func testAccFileCacheConfig_tags1(tagKey1, tagValue1 string) string { return testAccFileCacheBaseConfig() + fmt.Sprintf(` resource "aws_fsx_file_cache" "test" { @@ -673,7 +673,7 @@ resource "aws_fsx_file_cache" "test" { `, tagKey1, tagValue1) } -func testAccFileCache_tags2(tagKey1, tagValue1, tagKey2, tagValue2 string) string { +func testAccFileCacheConfig_tags2(tagKey1, tagValue1, tagKey2, tagValue2 string) string { return testAccFileCacheBaseConfig() + fmt.Sprintf(` resource "aws_fsx_file_cache" "test" { From 303cab0a30e9d81aafb559f443049c72ce3e491b Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Mon, 24 Oct 2022 10:27:55 -0400 Subject: [PATCH 22/32] add Example Usage --- website/docs/r/fsx_file_cache.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/fsx_file_cache.html.markdown b/website/docs/r/fsx_file_cache.html.markdown index e2ea5df33ba8..0ca4cecd73c0 100644 --- a/website/docs/r/fsx_file_cache.html.markdown +++ b/website/docs/r/fsx_file_cache.html.markdown @@ -10,7 +10,7 @@ description: |- Terraform resource for managing an AWS FSx File Cache. See the [Create File Cache](https://docs.aws.amazon.com/fsx/latest/APIReference/API_CreateFileCache.html) for more information. -### Basic Usage +## Example Usage ```terraform resource "aws_fsx_file_cache" "example" { From 3ee7450ea9ca38f3008492038c8b4a40c584cdcf Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Mon, 24 Oct 2022 10:54:57 -0400 Subject: [PATCH 23/32] fix test names --- internal/service/fsx/file_cache_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/fsx/file_cache_test.go b/internal/service/fsx/file_cache_test.go index 1b25a10681c2..eed0fe6110dc 100644 --- a/internal/service/fsx/file_cache_test.go +++ b/internal/service/fsx/file_cache_test.go @@ -23,8 +23,8 @@ import ( func TestAccFSxFileCache_serial(t *testing.T) { testCases := map[string]map[string]func(t *testing.T){ "FSxFileCache": { - "basic": TestAccFileCache_basic, - "disappears": TestAccFileCache_disappears, + "basic": TestAccFSxFileCache_basic, + "disappears": TestAccFSxFileCache_disappears, "kms_key_id": testAccFileCache_kmsKeyID, "copy_tags_to_data_repository_associations": testAccFileCache_copyTagsToDataRepositoryAssociations, "data_repository_association": testAccFileCache_dataRepositoryAssociation, From 0aeaff91819893eaaa349b68766a8ea14ef7ca32 Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Mon, 24 Oct 2022 17:26:23 -0400 Subject: [PATCH 24/32] fix failing tests --- internal/service/fsx/file_cache_test.go | 42 ++++++++++++++++--------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/internal/service/fsx/file_cache_test.go b/internal/service/fsx/file_cache_test.go index eed0fe6110dc..5f22866c1b3d 100644 --- a/internal/service/fsx/file_cache_test.go +++ b/internal/service/fsx/file_cache_test.go @@ -25,7 +25,7 @@ func TestAccFSxFileCache_serial(t *testing.T) { "FSxFileCache": { "basic": TestAccFSxFileCache_basic, "disappears": TestAccFSxFileCache_disappears, - "kms_key_id": testAccFileCache_kmsKeyID, + "kms_key_id": TestAccFileCache_kmsKeyID, "copy_tags_to_data_repository_associations": testAccFileCache_copyTagsToDataRepositoryAssociations, "data_repository_association": testAccFileCache_dataRepositoryAssociation, "security_group_id": testAccFileCache_securityGroupId, @@ -67,6 +67,7 @@ func TestAccFSxFileCache_basic(t *testing.T) { Config: testAccFileCacheConfig_basic(), Check: resource.ComposeTestCheckFunc( testAccCheckFileCacheExists(resourceName, &filecache), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexp.MustCompile(`file-cache/fc-.+`)), resource.TestCheckResourceAttr(resourceName, "copy_tags_to_data_repository_associations", "false"), resource.TestCheckResourceAttr(resourceName, "file_cache_type", "LUSTRE"), resource.TestCheckResourceAttr(resourceName, "file_cache_type_version", "2.12"), @@ -74,9 +75,8 @@ func TestAccFSxFileCache_basic(t *testing.T) { acctest.MatchResourceAttrRegionalARN(resourceName, "kms_key_id", "kms", regexp.MustCompile(`key\/.+`)), resource.TestCheckResourceAttr(resourceName, "lustre_configuration.0.deployment_type", "CACHE_1"), resource.TestCheckResourceAttr(resourceName, "lustre_configuration.0.metadata_configuration.0.storage_capacity", "2400"), - resource.TestCheckResourceAttr(resourceName, "lustre_configuration.0.metadata_configuration.0.per_unit_storage_capacity", "1000"), - resource.TestCheckResourceAttr(resourceName, "lustre_configuration.0.metadata_configuration.0.weekly_maintenance_start_time", "2:05:00"), - acctest.MatchResourceAttrRegionalARN(resourceName, "resource_arn", "fsx", regexp.MustCompile(`file-cache/fc-.+`)), + resource.TestCheckResourceAttr(resourceName, "lustre_configuration.0.per_unit_storage_throughput", "1000"), + resource.TestCheckResourceAttr(resourceName, "lustre_configuration.0.weekly_maintenance_start_time", "2:05:00"), resource.TestCheckResourceAttr(resourceName, "storage_capacity", "1200"), resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", "0"), ), @@ -121,7 +121,7 @@ func TestAccFSxFileCache_disappears(t *testing.T) { } // Per Attribute Acceptance Tests -func testAccFileCache_kmsKeyID(t *testing.T) { +func TestAccFileCache_kmsKeyID(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -135,12 +135,12 @@ func testAccFileCache_kmsKeyID(t *testing.T) { PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckWindowsFileSystemDestroy, + CheckDestroy: testAccCheckFileCacheDestroy, Steps: []resource.TestStep{ { Config: testAccFileCacheConfig_kmsKeyID1(), Check: resource.ComposeTestCheckFunc( - testAccCheckFileCacheExists(resourceName, &filecache2), + testAccCheckFileCacheExists(resourceName, &filecache1), resource.TestCheckResourceAttrPair(resourceName, "kms_key_id", kmsKeyResourceName1, "arn"), ), }, @@ -172,7 +172,7 @@ func testAccFileCache_copyTagsToDataRepositoryAssociations(t *testing.T) { PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckWindowsFileSystemDestroy, + CheckDestroy: testAccCheckFileCacheDestroy, Steps: []resource.TestStep{ { Config: testAccFileCacheConfig_copyTagsToDataRepositoryAssociations("key1", "value1", "key2", "value2"), @@ -198,17 +198,17 @@ func testAccFileCache_dataRepositoryAssociation(t *testing.T) { PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckWindowsFileSystemDestroy, + CheckDestroy: testAccCheckFileCacheDestroy, Steps: []resource.TestStep{ { Config: testAccFileCacheConfig_nfs_association(), Check: resource.ComposeTestCheckFunc( testAccCheckFileCacheExists(resourceName, &filecache1), - resource.TestCheckResourceAttr(resourceName, "data_repository_association.data_repository_path", "nfs://filer.domain.com"), - resource.TestCheckResourceAttr(resourceName, "data_repository_association.file_cache_path", "/ns1"), - resource.TestCheckResourceAttr(resourceName, "data_repository_association.nfs.dns_ips.0", "192.168.0.1"), - resource.TestCheckResourceAttr(resourceName, "data_repository_association.nfs.version", "NFS3"), - resource.TestCheckResourceAttr(resourceName, "data_repository_association_ids.#", "0"), + resource.TestCheckResourceAttr(resourceName, "data_repository_association.0.data_repository_path", "nfs://filer.domain.com/"), + resource.TestCheckResourceAttr(resourceName, "data_repository_association.0.file_cache_path", "/ns1"), + resource.TestCheckResourceAttr(resourceName, "data_repository_association.0.nfs.0.dns_ips.0", "192.168.0.1"), + resource.TestCheckResourceAttr(resourceName, "data_repository_association.0.nfs.0.version", "NFS3"), + resource.TestCheckResourceAttr(resourceName, "data_repository_association_ids.#", "1"), ), }, { @@ -244,7 +244,7 @@ func testAccFileCache_securityGroupId(t *testing.T) { PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckWindowsFileSystemDestroy, + CheckDestroy: testAccCheckFileCacheDestroy, Steps: []resource.TestStep{ { Config: testAccFileCacheConfig_securityGroupID(), @@ -529,6 +529,18 @@ func testAccFileCacheConfig_copyTagsToDataRepositoryAssociations(tagKey1, tagVal fmt.Sprintf(` resource "aws_fsx_file_cache" "test" { copy_tags_to_data_repository_associations = true + + data_repository_association { + data_repository_path = "nfs://filer.domain.com" + data_repository_subdirectories = ["test", "test2"] + file_cache_path = "/ns1" + + nfs { + dns_ips = ["192.168.0.1", "192.168.0.2"] + version = "NFS3" + } + } + file_cache_type = "LUSTRE" file_cache_type_version = "2.12" From e9e826a1e89b3e3414bf95be4a2beb96484de02e Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Tue, 25 Oct 2022 07:14:15 -0400 Subject: [PATCH 25/32] fix failing tests --- internal/service/fsx/file_cache_test.go | 99 +++++++++++++------------ 1 file changed, 50 insertions(+), 49 deletions(-) diff --git a/internal/service/fsx/file_cache_test.go b/internal/service/fsx/file_cache_test.go index 5f22866c1b3d..3ee7d909b63c 100644 --- a/internal/service/fsx/file_cache_test.go +++ b/internal/service/fsx/file_cache_test.go @@ -25,7 +25,7 @@ func TestAccFSxFileCache_serial(t *testing.T) { "FSxFileCache": { "basic": TestAccFSxFileCache_basic, "disappears": TestAccFSxFileCache_disappears, - "kms_key_id": TestAccFileCache_kmsKeyID, + "kms_key_id": testAccFileCache_kmsKeyID, "copy_tags_to_data_repository_associations": testAccFileCache_copyTagsToDataRepositoryAssociations, "data_repository_association": testAccFileCache_dataRepositoryAssociation, "security_group_id": testAccFileCache_securityGroupId, @@ -68,7 +68,6 @@ func TestAccFSxFileCache_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckFileCacheExists(resourceName, &filecache), acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexp.MustCompile(`file-cache/fc-.+`)), - resource.TestCheckResourceAttr(resourceName, "copy_tags_to_data_repository_associations", "false"), resource.TestCheckResourceAttr(resourceName, "file_cache_type", "LUSTRE"), resource.TestCheckResourceAttr(resourceName, "file_cache_type_version", "2.12"), resource.TestMatchResourceAttr(resourceName, "id", regexp.MustCompile(`fc-.+`)), @@ -78,7 +77,7 @@ func TestAccFSxFileCache_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "lustre_configuration.0.per_unit_storage_throughput", "1000"), resource.TestCheckResourceAttr(resourceName, "lustre_configuration.0.weekly_maintenance_start_time", "2:05:00"), resource.TestCheckResourceAttr(resourceName, "storage_capacity", "1200"), - resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", "0"), + resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", "1"), ), }, { @@ -121,44 +120,6 @@ func TestAccFSxFileCache_disappears(t *testing.T) { } // Per Attribute Acceptance Tests -func TestAccFileCache_kmsKeyID(t *testing.T) { - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - - var filecache1, filecache2 fsx.DescribeFileCachesOutput - kmsKeyResourceName1 := "aws_kms_key.test1" - kmsKeyResourceName2 := "aws_kms_key.test2" - resourceName := "aws_fsx_file_cache.test" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, - ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckFileCacheDestroy, - Steps: []resource.TestStep{ - { - Config: testAccFileCacheConfig_kmsKeyID1(), - Check: resource.ComposeTestCheckFunc( - testAccCheckFileCacheExists(resourceName, &filecache1), - resource.TestCheckResourceAttrPair(resourceName, "kms_key_id", kmsKeyResourceName1, "arn"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - }, - { - Config: testAccFileCacheConfig_kmsKeyID2(), - Check: resource.ComposeTestCheckFunc( - testAccCheckFileCacheExists(resourceName, &filecache2), - testAccCheckFileCacheRecreated(&filecache1, &filecache2), - resource.TestCheckResourceAttrPair(resourceName, "kms_key_id", kmsKeyResourceName2, "arn"), - ), - }, - }, - }) -} func testAccFileCache_copyTagsToDataRepositoryAssociations(t *testing.T) { if testing.Short() { @@ -215,17 +176,55 @@ func testAccFileCache_dataRepositoryAssociation(t *testing.T) { Config: testAccFileCacheConfig_s3_association(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckFileCacheExists(resourceName, &filecache1), - resource.TestCheckResourceAttr(resourceName, "data_repository_association.data_repository_path", bucketName), - resource.TestCheckResourceAttr(resourceName, "data_repository_association.file_cache_path", "/ns1"), - resource.TestCheckResourceAttr(resourceName, "data_repository_association_ids.#", "0"), - resource.TestCheckResourceAttr(resourceName, "data_repository_association_ids.#", "0"), + resource.TestCheckResourceAttr(resourceName, "data_repository_association.0.data_repository_path", bucketName), + resource.TestCheckResourceAttr(resourceName, "data_repository_association.0.file_cache_path", "/ns1"), + resource.TestCheckResourceAttr(resourceName, "data_repository_association_ids.#", "1"), ), }, { Config: testAccFileCacheConfig_multiple_associations(), Check: resource.ComposeTestCheckFunc( testAccCheckFileCacheExists(resourceName, &filecache1), - resource.TestCheckResourceAttr(resourceName, "data_repository_association_ids.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_repository_association_ids.#", "2"), + ), + }, + }, + }) +} + +func testAccFileCache_kmsKeyID(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var filecache1, filecache2 fsx.DescribeFileCachesOutput + kmsKeyResourceName1 := "aws_kms_key.test1" + kmsKeyResourceName2 := "aws_kms_key.test2" + resourceName := "aws_fsx_file_cache.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckFileCacheDestroy, + Steps: []resource.TestStep{ + { + Config: testAccFileCacheConfig_kmsKeyID1(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFileCacheExists(resourceName, &filecache1), + resource.TestCheckResourceAttrPair(resourceName, "kms_key_id", kmsKeyResourceName1, "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + }, + { + Config: testAccFileCacheConfig_kmsKeyID2(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFileCacheExists(resourceName, &filecache2), + testAccCheckFileCacheRecreated(&filecache1, &filecache2), + resource.TestCheckResourceAttrPair(resourceName, "kms_key_id", kmsKeyResourceName2, "arn"), ), }, }, @@ -297,6 +296,8 @@ func testAccFileCache_tags(t *testing.T) { }) } +// helper functions + func testAccCheckFileCacheDestroy(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn ctx := context.Background() @@ -421,7 +422,7 @@ func testAccFileCacheConfig_nfs_association() string { return testAccFileCacheBaseConfig() + ` resource "aws_fsx_file_cache" "test" { data_repository_association { - data_repository_path = "nfs://filer.domain.com" + data_repository_path = "nfs://filer.domain.com/" data_repository_subdirectories = ["test", "test2"] file_cache_path = "/ns1" @@ -494,7 +495,7 @@ resource "aws_fsx_file_cache" "test" { } data_repository_association { - data_repository_path = "nfs://filer.domain.com" + data_repository_path = "nfs://filer.domain.com/" data_repository_subdirectories = ["test", "test2"] file_cache_path = "/ns1" @@ -531,7 +532,7 @@ resource "aws_fsx_file_cache" "test" { copy_tags_to_data_repository_associations = true data_repository_association { - data_repository_path = "nfs://filer.domain.com" + data_repository_path = "nfs://filer.domain.com/" data_repository_subdirectories = ["test", "test2"] file_cache_path = "/ns1" From 8e397076b06554f653d4c2c70e25877718709bb7 Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Tue, 25 Oct 2022 07:20:04 -0400 Subject: [PATCH 26/32] terrafmt --- internal/service/fsx/file_cache_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/fsx/file_cache_test.go b/internal/service/fsx/file_cache_test.go index 3ee7d909b63c..7b4aee1deca2 100644 --- a/internal/service/fsx/file_cache_test.go +++ b/internal/service/fsx/file_cache_test.go @@ -542,8 +542,8 @@ resource "aws_fsx_file_cache" "test" { } } - file_cache_type = "LUSTRE" - file_cache_type_version = "2.12" + file_cache_type = "LUSTRE" + file_cache_type_version = "2.12" lustre_configuration { deployment_type = "CACHE_1" From c436ef920e88d678f7a62f072e00b4e016d16e9e Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Tue, 25 Oct 2022 08:06:19 -0400 Subject: [PATCH 27/32] fix testAccFileCache_dataRepositoryAssociation test --- internal/service/fsx/file_cache_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/service/fsx/file_cache_test.go b/internal/service/fsx/file_cache_test.go index 7b4aee1deca2..40cf269484d9 100644 --- a/internal/service/fsx/file_cache_test.go +++ b/internal/service/fsx/file_cache_test.go @@ -152,7 +152,7 @@ func testAccFileCache_dataRepositoryAssociation(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var filecache1 fsx.DescribeFileCachesOutput + var filecache1, filecache2, filecache3 fsx.DescribeFileCachesOutput resourceName := "aws_fsx_file_cache.test" bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.Test(t, resource.TestCase{ @@ -175,8 +175,8 @@ func testAccFileCache_dataRepositoryAssociation(t *testing.T) { { Config: testAccFileCacheConfig_s3_association(bucketName), Check: resource.ComposeTestCheckFunc( - testAccCheckFileCacheExists(resourceName, &filecache1), - resource.TestCheckResourceAttr(resourceName, "data_repository_association.0.data_repository_path", bucketName), + testAccCheckFileCacheExists(resourceName, &filecache2), + resource.TestCheckResourceAttr(resourceName, "data_repository_association.0.data_repository_path", "s3://"+bucketName), resource.TestCheckResourceAttr(resourceName, "data_repository_association.0.file_cache_path", "/ns1"), resource.TestCheckResourceAttr(resourceName, "data_repository_association_ids.#", "1"), ), @@ -184,7 +184,7 @@ func testAccFileCache_dataRepositoryAssociation(t *testing.T) { { Config: testAccFileCacheConfig_multiple_associations(), Check: resource.ComposeTestCheckFunc( - testAccCheckFileCacheExists(resourceName, &filecache1), + testAccCheckFileCacheExists(resourceName, &filecache3), resource.TestCheckResourceAttr(resourceName, "data_repository_association_ids.#", "2"), ), }, @@ -484,7 +484,7 @@ func testAccFileCacheConfig_multiple_associations() string { return testAccFileCacheBaseConfig() + ` resource "aws_fsx_file_cache" "test" { data_repository_association { - data_repository_path = "nfs://filer2.domain.com" + data_repository_path = "nfs://filer2.domain.com/" data_repository_subdirectories = ["test", "test2"] file_cache_path = "/ns2" From cf9befb4cfb959b2d0a066499a5acc6b6ca77600 Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Wed, 26 Oct 2022 11:20:36 -0400 Subject: [PATCH 28/32] tidy up flex functions --- internal/service/fsx/file_cache.go | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/internal/service/fsx/file_cache.go b/internal/service/fsx/file_cache.go index 530a730bb315..9eb5364cb2fd 100644 --- a/internal/service/fsx/file_cache.go +++ b/internal/service/fsx/file_cache.go @@ -462,8 +462,12 @@ func resourceFileCacheDelete(ctx context.Context, d *schema.ResourceData, meta i return nil } -func flattenDataRepositoryAssociations(dataRepositoryAssociations []*fsx.DataRepositoryAssociation, defaultTagsConfig *tftags.DefaultConfig, ignoreTagsConfig *tftags.IgnoreConfig) []map[string]interface{} { - flattenedDataRepositoryAssociations := make([]map[string]interface{}, 0) +func flattenDataRepositoryAssociations(dataRepositoryAssociations []*fsx.DataRepositoryAssociation, defaultTagsConfig *tftags.DefaultConfig, ignoreTagsConfig *tftags.IgnoreConfig) []interface{} { + if len(dataRepositoryAssociations) == 0 { + return nil + } + + var flattenedDataRepositoryAssociations []interface{} for _, dataRepositoryAssociation := range dataRepositoryAssociations { tags := KeyValueTags(dataRepositoryAssociation.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig) @@ -534,10 +538,18 @@ func flattenFileCacheLustreMetadataConfiguration(fileCacheLustreMetadataConfigur } func expandDataRepositoryAssociations(l []interface{}) []*fsx.FileCacheDataRepositoryAssociation { - dataRepositoryAssociations := []*fsx.FileCacheDataRepositoryAssociation{} + if len(l) == 0 { + return nil + } + + var dataRepositoryAssociations []*fsx.FileCacheDataRepositoryAssociation - for _, dataRepositoryAssociation := range l { - tfMap := dataRepositoryAssociation.(map[string]interface{}) + for _, tfMapRaw := range l { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } req := &fsx.FileCacheDataRepositoryAssociation{} if v, ok := tfMap["data_repository_path"].(string); ok { From 2a7222d01edd6cd5f9ad7590fdf68d5eeead770e Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Wed, 26 Oct 2022 11:20:51 -0400 Subject: [PATCH 29/32] tidy up tests --- internal/service/fsx/file_cache_test.go | 123 ++++++++++++++++++++---- 1 file changed, 104 insertions(+), 19 deletions(-) diff --git a/internal/service/fsx/file_cache_test.go b/internal/service/fsx/file_cache_test.go index 40cf269484d9..e39421c30636 100644 --- a/internal/service/fsx/file_cache_test.go +++ b/internal/service/fsx/file_cache_test.go @@ -27,7 +27,9 @@ func TestAccFSxFileCache_serial(t *testing.T) { "disappears": TestAccFSxFileCache_disappears, "kms_key_id": testAccFileCache_kmsKeyID, "copy_tags_to_data_repository_associations": testAccFileCache_copyTagsToDataRepositoryAssociations, - "data_repository_association": testAccFileCache_dataRepositoryAssociation, + "data_repository_association_multiple": TestAccFileCache_dataRepositoryAssociation_multiple, + "data_repository_association_nfs": testAccFileCache_dataRepositoryAssociation_nfs, + "data_repository_association_s3": testAccFileCache_dataRepositoryAssociation_s3, "security_group_id": testAccFileCache_securityGroupId, "tags": testAccFileCache_tags, }, @@ -84,7 +86,7 @@ func TestAccFSxFileCache_basic(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + ImportStateVerifyIgnore: []string{"copy_tags_to_data_repository_associations"}, }, }, }) @@ -143,18 +145,55 @@ func testAccFileCache_copyTagsToDataRepositoryAssociations(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "data_repository_association.0.tags.%", "2"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"copy_tags_to_data_repository_associations"}, + }, }, }) } -func testAccFileCache_dataRepositoryAssociation(t *testing.T) { +func TestAccFileCache_dataRepositoryAssociation_multiple(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } - var filecache1, filecache2, filecache3 fsx.DescribeFileCachesOutput + var filecache fsx.DescribeFileCachesOutput resourceName := "aws_fsx_file_cache.test" - bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckFileCacheDestroy, + Steps: []resource.TestStep{ + { + Config: testAccFileCacheConfig_multiple_associations(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFileCacheExists(resourceName, &filecache), + resource.TestCheckResourceAttr(resourceName, "data_repository_association_ids.#", "2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"copy_tags_to_data_repository_associations"}, + }, + }, + }) +} + +func testAccFileCache_dataRepositoryAssociation_nfs(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var filecache fsx.DescribeFileCachesOutput + resourceName := "aws_fsx_file_cache.test" + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), @@ -164,7 +203,7 @@ func testAccFileCache_dataRepositoryAssociation(t *testing.T) { { Config: testAccFileCacheConfig_nfs_association(), Check: resource.ComposeTestCheckFunc( - testAccCheckFileCacheExists(resourceName, &filecache1), + testAccCheckFileCacheExists(resourceName, &filecache), resource.TestCheckResourceAttr(resourceName, "data_repository_association.0.data_repository_path", "nfs://filer.domain.com/"), resource.TestCheckResourceAttr(resourceName, "data_repository_association.0.file_cache_path", "/ns1"), resource.TestCheckResourceAttr(resourceName, "data_repository_association.0.nfs.0.dns_ips.0", "192.168.0.1"), @@ -172,21 +211,45 @@ func testAccFileCache_dataRepositoryAssociation(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "data_repository_association_ids.#", "1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"copy_tags_to_data_repository_associations"}, + }, + }, + }) +} + +func testAccFileCache_dataRepositoryAssociation_s3(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var filecache fsx.DescribeFileCachesOutput + resourceName := "aws_fsx_file_cache.test" + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckFileCacheDestroy, + Steps: []resource.TestStep{ { Config: testAccFileCacheConfig_s3_association(bucketName), Check: resource.ComposeTestCheckFunc( - testAccCheckFileCacheExists(resourceName, &filecache2), + testAccCheckFileCacheExists(resourceName, &filecache), resource.TestCheckResourceAttr(resourceName, "data_repository_association.0.data_repository_path", "s3://"+bucketName), resource.TestCheckResourceAttr(resourceName, "data_repository_association.0.file_cache_path", "/ns1"), resource.TestCheckResourceAttr(resourceName, "data_repository_association_ids.#", "1"), ), }, { - Config: testAccFileCacheConfig_multiple_associations(), - Check: resource.ComposeTestCheckFunc( - testAccCheckFileCacheExists(resourceName, &filecache3), - resource.TestCheckResourceAttr(resourceName, "data_repository_association_ids.#", "2"), - ), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"copy_tags_to_data_repository_associations"}, }, }, }) @@ -216,8 +279,10 @@ func testAccFileCache_kmsKeyID(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"copy_tags_to_data_repository_associations"}, }, { Config: testAccFileCacheConfig_kmsKeyID2(), @@ -227,6 +292,12 @@ func testAccFileCache_kmsKeyID(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "kms_key_id", kmsKeyResourceName2, "arn"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"copy_tags_to_data_repository_associations"}, + }, }, }) } @@ -252,6 +323,12 @@ func testAccFileCache_securityGroupId(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"copy_tags_to_data_repository_associations", "security_group_ids"}, + }, }, }) } @@ -279,8 +356,10 @@ func testAccFileCache_tags(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"copy_tags_to_data_repository_associations"}, }, { Config: testAccFileCacheConfig_tags2("key1", "value1updated", "key2", "value2"), @@ -292,6 +371,12 @@ func testAccFileCache_tags(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"copy_tags_to_data_repository_associations"}, + }, }, }) } @@ -423,7 +508,7 @@ func testAccFileCacheConfig_nfs_association() string { resource "aws_fsx_file_cache" "test" { data_repository_association { data_repository_path = "nfs://filer.domain.com/" - data_repository_subdirectories = ["test", "test2"] + data_repository_subdirectories = ["test5", "test3", "test2", "test4", "test1"] file_cache_path = "/ns1" nfs { @@ -485,7 +570,7 @@ func testAccFileCacheConfig_multiple_associations() string { resource "aws_fsx_file_cache" "test" { data_repository_association { data_repository_path = "nfs://filer2.domain.com/" - data_repository_subdirectories = ["test", "test2"] + data_repository_subdirectories = ["test5", "test3", "test2", "test4", "test1"] file_cache_path = "/ns2" nfs { @@ -496,7 +581,7 @@ resource "aws_fsx_file_cache" "test" { data_repository_association { data_repository_path = "nfs://filer.domain.com/" - data_repository_subdirectories = ["test", "test2"] + data_repository_subdirectories = ["test5", "test3", "test2", "test4", "test1"] file_cache_path = "/ns1" nfs { From db684e3788a4e1f31aff373276d3231f1f1e8f79 Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Wed, 26 Oct 2022 13:56:30 -0400 Subject: [PATCH 30/32] troubleshoot data_repository_association recreation --- internal/service/fsx/file_cache.go | 2 +- internal/service/fsx/file_cache_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/service/fsx/file_cache.go b/internal/service/fsx/file_cache.go index 9eb5364cb2fd..88f66a1d3a35 100644 --- a/internal/service/fsx/file_cache.go +++ b/internal/service/fsx/file_cache.go @@ -367,7 +367,7 @@ func resourceFileCacheRead(ctx context.Context, d *schema.ResourceData, meta int d.Set("subnet_ids", aws.StringValueSlice(filecache.SubnetIds)) d.Set("vpc_id", filecache.VpcId) - if err := d.Set("data_repository_association_ids", aws.StringValueSlice(filecache.DataRepositoryAssociationIds)); err != nil { + if err := d.Set("data_repository_association_ids", filecache.DataRepositoryAssociationIds); err != nil { return create.DiagError(names.FSx, create.ErrActionSetting, ResNameFileCache, d.Id(), err) } if err := d.Set("lustre_configuration", flattenFileCacheLustreConfiguration(filecache.LustreConfiguration)); err != nil { diff --git a/internal/service/fsx/file_cache_test.go b/internal/service/fsx/file_cache_test.go index e39421c30636..6b721884a862 100644 --- a/internal/service/fsx/file_cache_test.go +++ b/internal/service/fsx/file_cache_test.go @@ -27,7 +27,7 @@ func TestAccFSxFileCache_serial(t *testing.T) { "disappears": TestAccFSxFileCache_disappears, "kms_key_id": testAccFileCache_kmsKeyID, "copy_tags_to_data_repository_associations": testAccFileCache_copyTagsToDataRepositoryAssociations, - "data_repository_association_multiple": TestAccFileCache_dataRepositoryAssociation_multiple, + "data_repository_association_multiple": testAccFileCache_dataRepositoryAssociation_multiple, "data_repository_association_nfs": testAccFileCache_dataRepositoryAssociation_nfs, "data_repository_association_s3": testAccFileCache_dataRepositoryAssociation_s3, "security_group_id": testAccFileCache_securityGroupId, @@ -155,7 +155,7 @@ func testAccFileCache_copyTagsToDataRepositoryAssociations(t *testing.T) { }) } -func TestAccFileCache_dataRepositoryAssociation_multiple(t *testing.T) { +func testAccFileCache_dataRepositoryAssociation_multiple(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } From bb6bf9ecee97a706178a35574ffe4a37a456b3d7 Mon Sep 17 00:00:00 2001 From: Albert Silva Date: Sun, 30 Oct 2022 11:59:37 -0400 Subject: [PATCH 31/32] update unecessary List schema objects to Set --- internal/service/fsx/file_cache.go | 38 +++++++++++++++--------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/internal/service/fsx/file_cache.go b/internal/service/fsx/file_cache.go index 88f66a1d3a35..0a24095ab669 100644 --- a/internal/service/fsx/file_cache.go +++ b/internal/service/fsx/file_cache.go @@ -50,7 +50,7 @@ func ResourceFileCache() *schema.Resource { Default: false, }, "data_repository_association": { - Type: schema.TypeList, + Type: schema.TypeSet, Optional: true, ForceNew: true, MaxItems: 8, @@ -68,7 +68,7 @@ func ResourceFileCache() *schema.Resource { ), }, "data_repository_subdirectories": { - Type: schema.TypeList, + Type: schema.TypeSet, Optional: true, MaxItems: 500, Elem: &schema.Schema{ @@ -102,12 +102,12 @@ func ResourceFileCache() *schema.Resource { Computed: true, }, "nfs": { - Type: schema.TypeList, + Type: schema.TypeSet, Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "dns_ips": { - Type: schema.TypeList, + Type: schema.TypeSet, Optional: true, MaxItems: 10, Elem: &schema.Schema{ @@ -176,7 +176,7 @@ func ResourceFileCache() *schema.Resource { ValidateFunc: verify.ValidARN, }, "lustre_configuration": { - Type: schema.TypeList, + Type: schema.TypeSet, Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -189,7 +189,7 @@ func ResourceFileCache() *schema.Resource { ), }, "log_configuration": { - Type: schema.TypeList, + Type: schema.TypeSet, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -205,7 +205,7 @@ func ResourceFileCache() *schema.Resource { }, }, "metadata_configuration": { - Type: schema.TypeList, + Type: schema.TypeSet, Required: true, ForceNew: true, MaxItems: 8, @@ -257,7 +257,7 @@ func ResourceFileCache() *schema.Resource { Computed: true, }, "security_group_ids": { - Type: schema.TypeList, + Type: schema.TypeSet, Optional: true, ForceNew: true, MaxItems: 50, @@ -308,17 +308,17 @@ func resourceFileCacheCreate(ctx context.Context, d *schema.ResourceData, meta i if v, ok := d.GetOk("copy_tags_to_data_repository_associations"); ok { input.CopyTagsToDataRepositoryAssociations = aws.Bool(v.(bool)) } - if v, ok := d.GetOk("data_repository_association"); ok && len(v.([]interface{})) > 0 { - input.DataRepositoryAssociations = expandDataRepositoryAssociations(v.([]interface{})) + if v, ok := d.GetOk("data_repository_association"); ok && len(v.(*schema.Set).List()) > 0 { + input.DataRepositoryAssociations = expandDataRepositoryAssociations(v.(*schema.Set).List()) } if v, ok := d.GetOk("kms_key_id"); ok { input.KmsKeyId = aws.String(v.(string)) } - if v, ok := d.GetOk("lustre_configuration"); ok && len(v.([]interface{})) > 0 { - input.LustreConfiguration = expandCreateFileCacheLustreConfiguration(v.([]interface{})) + if v, ok := d.GetOk("lustre_configuration"); ok && len(v.(*schema.Set).List()) > 0 { + input.LustreConfiguration = expandCreateFileCacheLustreConfiguration(v.(*schema.Set).List()) } if v, ok := d.GetOk("security_group_ids"); ok { - input.SecurityGroupIds = flex.ExpandStringList(v.([]interface{})) + input.SecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) } if len(tags) > 0 { input.Tags = Tags(tags.IgnoreAWS()) @@ -556,13 +556,13 @@ func expandDataRepositoryAssociations(l []interface{}) []*fsx.FileCacheDataRepos req.DataRepositoryPath = aws.String(v) } if v, ok := tfMap["data_repository_subdirectories"]; ok { - req.DataRepositorySubdirectories = flex.ExpandStringList(v.([]interface{})) + req.DataRepositorySubdirectories = flex.ExpandStringSet(v.(*schema.Set)) } if v, ok := tfMap["file_cache_path"].(string); ok { req.FileCachePath = aws.String(v) } - if v, ok := tfMap["nfs"]; ok && len(v.([]interface{})) > 0 { - req.NFS = expandFileCacheNFSConfiguration(v.([]interface{})) + if v, ok := tfMap["nfs"]; ok && len(v.(*schema.Set).List()) > 0 { + req.NFS = expandFileCacheNFSConfiguration(v.(*schema.Set).List()) } dataRepositoryAssociations = append(dataRepositoryAssociations, req) } @@ -578,7 +578,7 @@ func expandFileCacheNFSConfiguration(l []interface{}) *fsx.FileCacheNFSConfigura req := &fsx.FileCacheNFSConfiguration{} if v, ok := data["dns_ips"]; ok { - req.DnsIps = flex.ExpandStringList(v.([]interface{})) + req.DnsIps = flex.ExpandStringSet(v.(*schema.Set)) } if v, ok := data["version"].(string); ok { req.Version = aws.String(v) @@ -612,8 +612,8 @@ func expandCreateFileCacheLustreConfiguration(l []interface{}) *fsx.CreateFileCa if v, ok := data["deployment_type"].(string); ok { req.DeploymentType = aws.String(v) } - if v, ok := data["metadata_configuration"]; ok && len(v.([]interface{})) > 0 { - req.MetadataConfiguration = expandFileCacheLustreMetadataConfiguration(v.([]interface{})) + if v, ok := data["metadata_configuration"]; ok && len(v.(*schema.Set).List()) > 0 { + req.MetadataConfiguration = expandFileCacheLustreMetadataConfiguration(v.(*schema.Set).List()) } if v, ok := data["per_unit_storage_throughput"].(int); ok { req.PerUnitStorageThroughput = aws.Int64(int64(v)) From 39960ca6e462330b031169f7acc309817c570749 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 31 Oct 2022 16:27:32 -0400 Subject: [PATCH 32/32] Update 27384.txt --- .changelog/27384.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.changelog/27384.txt b/.changelog/27384.txt index fb0cfabc23b4..240d15d617d5 100644 --- a/.changelog/27384.txt +++ b/.changelog/27384.txt @@ -1,3 +1,3 @@ ```release-note:new-resource -aws_fsx_filecache -``` \ No newline at end of file +aws_fsx_file_cache +```