Skip to content

Commit

Permalink
Merge pull request #15231 from DrFaust92/fsx_auto_import_policy
Browse files Browse the repository at this point in the history
fsx_lustre_file_system - add `auto_import_policy`  argument
  • Loading branch information
breathingdust committed Oct 1, 2020
2 parents 43ed551 + fae0167 commit e1d887f
Show file tree
Hide file tree
Showing 3 changed files with 76 additions and 9 deletions.
31 changes: 22 additions & 9 deletions aws/resource_aws_fsx_lustre_file_system.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ func resourceAwsFsxLustreFileSystem() *schema.Resource {
validation.StringLenBetween(3, 900),
validation.StringMatch(regexp.MustCompile(`^s3://`), "must begin with s3://"),
),
RequiredWith: []string{"auto_import_policy"},
},
"imported_file_chunk_size": {
Type: schema.TypeInt,
Expand Down Expand Up @@ -115,15 +116,11 @@ func resourceAwsFsxLustreFileSystem() *schema.Resource {
),
},
"deployment_type": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Default: fsx.LustreDeploymentTypeScratch1,
ValidateFunc: validation.StringInSlice([]string{
fsx.LustreDeploymentTypeScratch1,
fsx.LustreDeploymentTypeScratch2,
fsx.LustreDeploymentTypePersistent1,
}, false),
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Default: fsx.LustreDeploymentTypeScratch1,
ValidateFunc: validation.StringInSlice(fsx.LustreDeploymentType_Values(), false),
},
"kms_key_id": {
Type: schema.TypeString,
Expand Down Expand Up @@ -172,6 +169,12 @@ func resourceAwsFsxLustreFileSystem() *schema.Resource {
ForceNew: true,
ValidateFunc: validation.StringInSlice(fsx.DriveCacheType_Values(), false),
},
"auto_import_policy": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ValidateFunc: validation.StringInSlice(fsx.AutoImportPolicyType_Values(), false),
},
},
}
}
Expand Down Expand Up @@ -235,6 +238,10 @@ func resourceAwsFsxLustreFileSystemCreate(d *schema.ResourceData, meta interface
input.LustreConfiguration.DriveCacheType = aws.String(v.(string))
}

if v, ok := d.GetOk("auto_import_policy"); ok {
input.LustreConfiguration.AutoImportPolicy = aws.String(v.(string))
}

result, err := conn.CreateFileSystem(input)
if err != nil {
return fmt.Errorf("Error creating FSx Lustre filesystem: %w", err)
Expand Down Expand Up @@ -284,6 +291,11 @@ func resourceAwsFsxLustreFileSystemUpdate(d *schema.ResourceData, meta interface
requestUpdate = true
}

if d.HasChange("auto_import_policy") {
input.LustreConfiguration.AutoImportPolicy = aws.String(d.Get("auto_import_policy").(string))
requestUpdate = true
}

if requestUpdate {
_, err := conn.UpdateFileSystem(input)
if err != nil {
Expand Down Expand Up @@ -341,6 +353,7 @@ func resourceAwsFsxLustreFileSystemRead(d *schema.ResourceData, meta interface{}
d.Set("dns_name", filesystem.DNSName)
d.Set("export_path", lustreConfig.DataRepositoryConfiguration.ExportPath)
d.Set("import_path", lustreConfig.DataRepositoryConfiguration.ImportPath)
d.Set("auto_import_policy", lustreConfig.DataRepositoryConfiguration.AutoImportPolicy)
d.Set("imported_file_chunk_size", lustreConfig.DataRepositoryConfiguration.ImportedFileChunkSize)
d.Set("deployment_type", lustreConfig.DeploymentType)
if lustreConfig.PerUnitStorageThroughput != nil {
Expand Down
53 changes: 53 additions & 0 deletions aws/resource_aws_fsx_lustre_file_system_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,7 @@ func TestAccAWSFsxLustreFileSystem_ExportPath(t *testing.T) {
Check: resource.ComposeTestCheckFunc(
testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem1),
resource.TestCheckResourceAttr(resourceName, "export_path", fmt.Sprintf("s3://%s", rName)),
resource.TestCheckResourceAttr(resourceName, "auto_import_policy", "NONE"),
),
},
{
Expand All @@ -164,6 +165,7 @@ func TestAccAWSFsxLustreFileSystem_ExportPath(t *testing.T) {
testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem2),
testAccCheckFsxLustreFileSystemRecreated(&filesystem1, &filesystem2),
resource.TestCheckResourceAttr(resourceName, "export_path", fmt.Sprintf("s3://%s/prefix/", rName)),
resource.TestCheckResourceAttr(resourceName, "auto_import_policy", "NONE"),
),
},
},
Expand Down Expand Up @@ -617,6 +619,40 @@ func TestAccAWSFsxLustreFileSystem_StorageTypeHddDriveCacheNone(t *testing.T) {
})
}

func TestAccAWSFsxLustreFileSystem_autoImportPolicy(t *testing.T) {
var filesystem fsx.FileSystem
resourceName := "aws_fsx_lustre_file_system.test"
rName := acctest.RandomWithPrefix("tf-acc-test")

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck("fsx", t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckFsxLustreFileSystemDestroy,
Steps: []resource.TestStep{
{
Config: testAccAwsFsxLustreFileSystemAutoImportPolicyConfig(rName, "", "NEW"),
Check: resource.ComposeTestCheckFunc(
testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem),
resource.TestCheckResourceAttr(resourceName, "auto_import_policy", "NEW"),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"security_group_ids"},
},
{
Config: testAccAwsFsxLustreFileSystemAutoImportPolicyConfig(rName, "", "NEW_CHANGED"),
Check: resource.ComposeTestCheckFunc(
testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem),
resource.TestCheckResourceAttr(resourceName, "auto_import_policy", "NEW_CHANGED"),
),
},
},
})
}

func testAccCheckFsxLustreFileSystemExists(resourceName string, fs *fsx.FileSystem) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[resourceName]
Expand Down Expand Up @@ -981,3 +1017,20 @@ resource "aws_fsx_lustre_file_system" "test" {
}
`, drive_cache_type)
}

func testAccAwsFsxLustreFileSystemAutoImportPolicyConfig(rName, exportPrefix, policy string) string {
return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(`
resource "aws_s3_bucket" "test" {
acl = "private"
bucket = %[1]q
}
resource "aws_fsx_lustre_file_system" "test" {
export_path = "s3://${aws_s3_bucket.test.bucket}%[2]s"
import_path = "s3://${aws_s3_bucket.test.bucket}"
auto_import_policy = %[3]q
storage_capacity = 1200
subnet_ids = [aws_subnet.test1.id]
}
`, rName, exportPrefix, policy)
}
1 change: 1 addition & 0 deletions website/docs/r/fsx_lustre_file_system.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ The following arguments are supported:
* `storage_type` - (Optional) - The filesystem storage type. Either `SSD` or `HDD`, defaults to `SSD`. `HDD` is only supported on `PERSISTENT_1` deployment types.
* `drive_cache_type` - (Optional) - The type of drive cache used by `PERSISTENT_1` filesystems that are provisioned with `HDD` storage_type. Required for `HDD` storage_type, set to either `READ` or `NONE`.
* `daily_automatic_backup_start_time` - (Optional) A recurring daily time, in the format HH:MM. HH is the zero-padded hour of the day (0-23), and MM is the zero-padded minute of the hour. For example, 05:00 specifies 5 AM daily. only valid for `PERSISTENT_1` deployment_type. Requires `automatic_backup_retention_days` to be set.
* `auto_import_policy` - (Optional) How Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. see [Auto Import Data Repo](https://docs.aws.amazon.com/fsx/latest/LustreGuide/autoimport-data-repo.html) for more details.

## Attributes Reference

Expand Down

0 comments on commit e1d887f

Please sign in to comment.