From aa7c05c7f78ae6595058867383b7118aa13d6e45 Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Thu, 19 Nov 2020 23:15:32 -0800 Subject: [PATCH 01/11] Start of share files --- .../services/storage/client/client.go | 19 ++ .../internal/services/storage/registration.go | 1 + .../resource_arm_storage_share_file.go | 250 ++++++++++++++++++ .../resource_arm_storage_share_file_test.go | 189 +++++++++++++ .../storage/2019-12-12/file/files/README.md | 47 ++++ .../storage/2019-12-12/file/files/api.go | 28 ++ .../storage/2019-12-12/file/files/client.go | 25 ++ .../storage/2019-12-12/file/files/copy.go | 132 +++++++++ .../2019-12-12/file/files/copy_abort.go | 104 ++++++++ .../2019-12-12/file/files/copy_wait.go | 55 ++++ .../storage/2019-12-12/file/files/create.go | 169 ++++++++++++ .../storage/2019-12-12/file/files/delete.go | 94 +++++++ .../2019-12-12/file/files/metadata_get.go | 111 ++++++++ .../2019-12-12/file/files/metadata_set.go | 105 ++++++++ .../2019-12-12/file/files/properties_get.go | 144 ++++++++++ .../2019-12-12/file/files/properties_set.go | 182 +++++++++++++ .../2019-12-12/file/files/range_clear.go | 112 ++++++++ .../2019-12-12/file/files/range_get.go | 121 +++++++++ .../2019-12-12/file/files/range_get_file.go | 128 +++++++++ .../2019-12-12/file/files/range_put.go | 130 +++++++++ .../2019-12-12/file/files/range_put_file.go | 107 ++++++++ .../2019-12-12/file/files/ranges_list.go | 114 ++++++++ .../2019-12-12/file/files/resource_id.go | 64 +++++ .../storage/2019-12-12/file/files/version.go | 14 + vendor/modules.txt | 1 + 25 files changed, 2446 insertions(+) create mode 100644 azurerm/internal/services/storage/resource_arm_storage_share_file.go create mode 100644 azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go create mode 100644 vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/README.md create mode 100644 vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/api.go create mode 100644 vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/client.go create mode 100644 vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/copy.go create mode 100644 vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/copy_abort.go create mode 100644 vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/copy_wait.go create mode 100644 vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/create.go create mode 100644 vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/delete.go create mode 100644 vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/metadata_get.go create mode 100644 vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/metadata_set.go create mode 100644 vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/properties_get.go create mode 100644 vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/properties_set.go create mode 100644 vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_clear.go create mode 100644 vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_get.go create mode 100644 vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_get_file.go create mode 100644 vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_put.go create mode 100644 vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_put_file.go create mode 100644 vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/ranges_list.go create mode 100644 vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/resource_id.go create mode 100644 vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/version.go diff --git a/azurerm/internal/services/storage/client/client.go b/azurerm/internal/services/storage/client/client.go index d09aa21ee362..d53df8621fdd 100644 --- a/azurerm/internal/services/storage/client/client.go +++ b/azurerm/internal/services/storage/client/client.go @@ -16,6 +16,7 @@ import ( "github.com/tombuildsstuff/giovanni/storage/2019-12-12/datalakestore/filesystems" "github.com/tombuildsstuff/giovanni/storage/2019-12-12/datalakestore/paths" "github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/directories" + "github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files" "github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/shares" "github.com/tombuildsstuff/giovanni/storage/2019-12-12/queue/queues" "github.com/tombuildsstuff/giovanni/storage/2019-12-12/table/entities" @@ -174,6 +175,24 @@ func (client Client) FileShareDirectoriesClient(ctx context.Context, account acc return &directoriesClient, nil } +func (client Client) FileShareFilesClient(ctx context.Context, account accountDetails) (*files.Client, error) { + // NOTE: Files do not support AzureAD Authentication + + accountKey, err := account.AccountKey(ctx, client) + if err != nil { + return nil, fmt.Errorf("Error retrieving Account Key: %s", err) + } + + storageAuth, err := autorest.NewSharedKeyAuthorizer(account.name, *accountKey, autorest.SharedKeyLite) + if err != nil { + return nil, fmt.Errorf("Error building Authorizer: %+v", err) + } + + filesClient := files.NewWithEnvironment(client.Environment) + filesClient.Client.Authorizer = storageAuth + return &filesClient, nil +} + func (client Client) FileSharesClient(ctx context.Context, account accountDetails) (shim.StorageShareWrapper, error) { // NOTE: Files do not support AzureAD Authentication diff --git a/azurerm/internal/services/storage/registration.go b/azurerm/internal/services/storage/registration.go index 5835522e08f2..d8943ee904be 100644 --- a/azurerm/internal/services/storage/registration.go +++ b/azurerm/internal/services/storage/registration.go @@ -46,6 +46,7 @@ func (r Registration) SupportedResources() map[string]*schema.Resource { "azurerm_storage_management_policy": resourceArmStorageManagementPolicy(), "azurerm_storage_queue": resourceArmStorageQueue(), "azurerm_storage_share": resourceArmStorageShare(), + "azurerm_storage_share_file": resourceArmStorageShareFile(), "azurerm_storage_share_directory": resourceArmStorageShareDirectory(), "azurerm_storage_table": resourceArmStorageTable(), "azurerm_storage_table_entity": resourceArmStorageTableEntity(), diff --git a/azurerm/internal/services/storage/resource_arm_storage_share_file.go b/azurerm/internal/services/storage/resource_arm_storage_share_file.go new file mode 100644 index 000000000000..3622cc35cb4a --- /dev/null +++ b/azurerm/internal/services/storage/resource_arm_storage_share_file.go @@ -0,0 +1,250 @@ +package storage + +import ( + "fmt" + "github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceArmStorageShareFile() *schema.Resource { + return &schema.Resource{ + Create: resourceArmStorageShareFileCreate, + Read: resourceArmStorageShareFileRead, + Update: resourceArmStorageShareFileUpdate, + Delete: resourceArmStorageShareFileDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Read: schema.DefaultTimeout(5 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + // TODO: add validation + }, + "share_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "storage_account_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "directory_name": { + Type: schema.TypeString, + Optional: true, + Default: "", + ValidateFunc: validate.StorageShareDirectoryName, + }, + + "metadata": MetaDataSchema(), + }, + } +} + +func resourceArmStorageShareFileCreate(d *schema.ResourceData, meta interface{}) error { + ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) + defer cancel() + storageClient := meta.(*clients.Client).Storage + + accountName := d.Get("storage_account_name").(string) + shareName := d.Get("share_name").(string) + fileName := d.Get("name").(string) + directoryName := d.Get("directory_name").(string) + + metaDataRaw := d.Get("metadata").(map[string]interface{}) + metaData := ExpandMetaData(metaDataRaw) + + account, err := storageClient.FindAccount(ctx, accountName) + if err != nil { + return fmt.Errorf("Error retrieving Account %q for File %q (Share %q): %s", accountName, fileName, shareName, err) + } + if account == nil { + return fmt.Errorf("Unable to locate Storage Account %q!", accountName) + } + + client, err := storageClient.FileShareFilesClient(ctx, *account) + if err != nil { + return fmt.Errorf("Error building File Share Directories Client: %s", err) + } + + existing, err := client.GetProperties(ctx, accountName, shareName, directoryName, fileName) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing File %q (File Share %q / Storage Account %q / Resource Group %q): %s", fileName, shareName, accountName, account.ResourceGroup, err) + } + } + + if !utils.ResponseWasNotFound(existing.Response) { + id := client.GetResourceID(accountName, shareName, directoryName, fileName) + return tf.ImportAsExistsError("azurerm_storage_share_file", id) + } + + input := files.CreateInput{ + MetaData: metaData, + } + if _, err := client.Create(ctx, accountName, shareName, directoryName, fileName, input); err != nil { + return fmt.Errorf("Error creating File %q (File Share %q / Account %q): %+v", fileName, shareName, accountName, err) + } + + // TODO Check if this is true + /* + // Storage Share Directories are eventually consistent + log.Printf("[DEBUG] Waiting for File %q (File Share %q / Account %q) to become available", fileName, shareName, accountName) + stateConf := &resource.StateChangeConf{ + Pending: []string{"404"}, + Target: []string{"200"}, + Refresh: storageShareDirectoryRefreshFunc(ctx, client, accountName, shareName, directoryName), + MinTimeout: 10 * time.Second, + ContinuousTargetOccurence: 5, + Timeout: d.Timeout(schema.TimeoutCreate), + } + + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for Directory %q (File Share %q / Account %q) to become available: %s", directoryName, shareName, accountName, err) + }*/ + + resourceID := client.GetResourceID(accountName, shareName, directoryName, fileName) + d.SetId(resourceID) + + return resourceArmStorageShareFileRead(d, meta) +} + +func resourceArmStorageShareFileUpdate(d *schema.ResourceData, meta interface{}) error { + ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + storageClient := meta.(*clients.Client).Storage + + id, err := files.ParseResourceID(d.Id()) + if err != nil { + return err + } + + metaDataRaw := d.Get("metadata").(map[string]interface{}) + metaData := ExpandMetaData(metaDataRaw) + + account, err := storageClient.FindAccount(ctx, id.AccountName) + if err != nil { + return fmt.Errorf("Error retrieving Account %q for File %q (Share %q): %s", id.AccountName, id.FileName, id.ShareName, err) + } + if account == nil { + return fmt.Errorf("Unable to locate Storage Account %q!", id.AccountName) + } + + client, err := storageClient.FileShareFilesClient(ctx, *account) + if err != nil { + return fmt.Errorf("Error building File Share File Client: %s", err) + } + + if _, err := client.SetMetaData(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName, metaData); err != nil { + return fmt.Errorf("Error updating MetaData for File %q (File Share %q / Account %q): %+v", id.FileName, id.ShareName, id.AccountName, err) + } + + return resourceArmStorageShareFileRead(d, meta) +} + +func resourceArmStorageShareFileRead(d *schema.ResourceData, meta interface{}) error { + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + storageClient := meta.(*clients.Client).Storage + + id, err := files.ParseResourceID(d.Id()) + if err != nil { + return err + } + + account, err := storageClient.FindAccount(ctx, id.AccountName) + if err != nil { + return fmt.Errorf("Error retrieving Account %q for File %q (Share %q): %s", id.AccountName, id.FileName, id.ShareName, err) + } + if account == nil { + log.Printf("[WARN] Unable to determine Resource Group for Storage Share File %q (Share %s, Account %s) - assuming removed & removing from state", id.FileName, id.ShareName, id.AccountName) + d.SetId("") + return nil + } + + client, err := storageClient.FileShareFilesClient(ctx, *account) + if err != nil { + return fmt.Errorf("Error building File Share Client for Storage Account %q (Resource Group %q): %s", id.AccountName, account.ResourceGroup, err) + } + + props, err := client.GetProperties(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName) + if err != nil { + return fmt.Errorf("Error retrieving Storage Share %q (File Share %q / Account %q / Resource Group %q): %s", id.DirectoryName, id.ShareName, id.AccountName, account.ResourceGroup, err) + } + + d.Set("name", id.FileName) + d.Set("directory_name", id.DirectoryName) + d.Set("share_name", id.ShareName) + d.Set("storage_account_name", id.AccountName) + + if err := d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { + return fmt.Errorf("Error setting `metadata`: %s", err) + } + + return nil +} + +func resourceArmStorageShareFileDelete(d *schema.ResourceData, meta interface{}) error { + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + storageClient := meta.(*clients.Client).Storage + + id, err := files.ParseResourceID(d.Id()) + if err != nil { + return err + } + + account, err := storageClient.FindAccount(ctx, id.AccountName) + if err != nil { + return fmt.Errorf("Error retrieving Account %q for File %q (Share %q): %s", id.AccountName, id.FileName, id.ShareName, err) + } + if account == nil { + return fmt.Errorf("Unable to locate Storage Account %q!", id.AccountName) + } + + client, err := storageClient.FileShareFilesClient(ctx, *account) + if err != nil { + return fmt.Errorf("Error building File Share File Client for Storage Account %q (Resource Group %q): %s", id.AccountName, account.ResourceGroup, err) + } + + if _, err := client.Delete(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName); err != nil { + return fmt.Errorf("Error deleting Storage Share File %q (File Share %q / Account %q / Resource Group %q): %s", id.FileName, id.ShareName, id.AccountName, account.ResourceGroup, err) + } + + return nil +} + +/* +func storageShareDirectoryRefreshFunc(ctx context.Context, client *directories.Client, accountName, shareName, directoryName string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + res, err := client.Get(ctx, accountName, shareName, directoryName) + if err != nil { + return nil, strconv.Itoa(res.StatusCode), fmt.Errorf("Error retrieving Directory %q (File Share %q / Account %q): %s", directoryName, shareName, accountName, err) + } + + return res, strconv.Itoa(res.StatusCode), nil + } +} +*/ diff --git a/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go b/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go new file mode 100644 index 000000000000..6c663d714409 --- /dev/null +++ b/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go @@ -0,0 +1,189 @@ +package tests + +import ( + "fmt" + "net/http" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" +) + +func TestAccAzureRMStorageShareFile_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_storage_share_file", "test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMStorageShareFileDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMStorageShareFile_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMStorageShareFileExists(data.ResourceName), + ), + }, + data.ImportStep(), + }, + }) +} + +func TestAccAzureRMStorageShareFile_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_storage_share_file", "test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMStorageShareFileDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMStorageShareFile_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMStorageShareFileExists(data.ResourceName), + ), + }, + data.RequiresImportErrorStep(testAccAzureRMStorageShareFile_requiresImport), + }, + }) +} + +func testCheckAzureRMStorageShareFileExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + storageClient := acceptance.AzureProvider.Meta().(*clients.Client).Storage + ctx := acceptance.AzureProvider.Meta().(*clients.Client).StopContext + + // Ensure we have enough information in state to look up in API + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + name := rs.Primary.Attributes["name"] + shareName := rs.Primary.Attributes["share_name"] + accountName := rs.Primary.Attributes["storage_account_name"] + directoryName := rs.Primary.Attributes["directory_name"] + + account, err := storageClient.FindAccount(ctx, accountName) + if err != nil { + return fmt.Errorf("Error retrieving Account %q for File %q (Share %q): %s", accountName, name, shareName, err) + } + if account == nil { + return fmt.Errorf("Unable to locate Storage Account %q!", accountName) + } + + client, err := storageClient.FileShareFilesClient(ctx, *account) + if err != nil { + return fmt.Errorf("Error building FileShare File Client: %s", err) + } + + resp, err := client.GetProperties(ctx, accountName, shareName, directoryName, name) + if err != nil { + return fmt.Errorf("Bad: Get on FileShareFilesClient: %+v", err) + } + + if resp.StatusCode == http.StatusNotFound { + return fmt.Errorf("Bad: File %q (File Share %q / Account %q / Resource Group %q) does not exist", name, shareName, accountName, account.ResourceGroup) + } + + return nil + } +} + +func testCheckAzureRMStorageShareFileDestroy(s *terraform.State) error { + storageClient := acceptance.AzureProvider.Meta().(*clients.Client).Storage + ctx := acceptance.AzureProvider.Meta().(*clients.Client).StopContext + + for _, rs := range s.RootModule().Resources { + if rs.Type != "azurerm_storage_share_file" { + continue + } + + name := rs.Primary.Attributes["name"] + shareName := rs.Primary.Attributes["share_name"] + accountName := rs.Primary.Attributes["storage_account_name"] + directoryName := rs.Primary.Attributes["directory_name"] + + account, err := storageClient.FindAccount(ctx, accountName) + if err != nil { + return fmt.Errorf("Error retrieving Account %q for File %q (Share %q): %s", accountName, name, shareName, err) + } + + // not found, the account's gone + if account == nil { + return nil + } + + if err != nil { + return fmt.Errorf("Error locating Resource Group for Storage Share File %q (Share %s, Account %s): %s", name, shareName, accountName, err) + } + + client, err := storageClient.FileShareFilesClient(ctx, *account) + if err != nil { + return fmt.Errorf("Error building FileShare File Client: %s", err) + } + + resp, err := client.GetProperties(ctx, accountName, shareName, directoryName, name) + if err != nil { + return nil + } + + return fmt.Errorf("File Share still exists:\n%#v", resp) + } + + return nil +} + +func testAccAzureRMStorageShareFile_basic(data acceptance.TestData) string { + template := testAccAzureRMStorageShareFile_template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_share_file" "test" { + name = "dir" + share_name = azurerm_storage_share.test.name + storage_account_name = azurerm_storage_account.test.name +} +`, template) +} + +func testAccAzureRMStorageShareFile_requiresImport(data acceptance.TestData) string { + template := testAccAzureRMStorageShareFile_basic(data) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_share_file" "import" { + name = azurerm_storage_share_file.test.name + share_name = azurerm_storage_share_file.test.share_name + storage_account_name = azurerm_storage_share_file.test.storage_account_name +} +`, template) +} + +func testAccAzureRMStorageShareFile_template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_storage_account" "test" { + name = "acctestsa%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_tier = "Standard" + account_replication_type = "LRS" +} + +resource "azurerm_storage_share" "test" { + name = "fileshare" + storage_account_name = azurerm_storage_account.test.name + quota = 50 +} +`, data.RandomInteger, data.Locations.Primary, data.RandomString) +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/README.md b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/README.md new file mode 100644 index 000000000000..e9db27b5a862 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/README.md @@ -0,0 +1,47 @@ +## File Storage Files SDK for API version 2019-12-12 + +This package allows you to interact with the Files File Storage API + +### Supported Authorizers + +* Azure Active Directory (for the Resource Endpoint `https://storage.azure.com`) +* SharedKeyLite (Blob, File & Queue) + +### Limitations + +* At this time the headers `x-ms-file-permission` and `x-ms-file-attributes` are hard-coded (to `inherit` and `None`, respectively). + +### Example Usage + +```go +package main + +import ( + "context" + "fmt" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files" +) + +func Example() error { + accountName := "storageaccount1" + storageAccountKey := "ABC123...." + shareName := "myshare" + directoryName := "myfiles" + fileName := "example.txt" + + storageAuth := autorest.NewSharedKeyLiteAuthorizer(accountName, storageAccountKey) + filesClient := files.New() + filesClient.Client.Authorizer = storageAuth + + ctx := context.TODO() + input := files.CreateInput{} + if _, err := filesClient.Create(ctx, accountName, shareName, directoryName, fileName, input); err != nil { + return fmt.Errorf("Error creating File: %s", err) + } + + return nil +} +``` \ No newline at end of file diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/api.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/api.go new file mode 100644 index 000000000000..96a0491e70da --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/api.go @@ -0,0 +1,28 @@ +package files + +import ( + "context" + "os" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +type StorageFile interface { + PutByteRange(ctx context.Context, accountName, shareName, path, fileName string, input PutByteRangeInput) (result autorest.Response, err error) + GetByteRange(ctx context.Context, accountName, shareName, path, fileName string, input GetByteRangeInput) (result GetByteRangeResult, err error) + ClearByteRange(ctx context.Context, accountName, shareName, path, fileName string, input ClearByteRangeInput) (result autorest.Response, err error) + SetProperties(ctx context.Context, accountName, shareName, path, fileName string, input SetPropertiesInput) (result autorest.Response, err error) + PutFile(ctx context.Context, accountName, shareName, path, fileName string, file *os.File, parallelism int) error + Copy(ctx context.Context, accountName, shareName, path, fileName string, input CopyInput) (result CopyResult, err error) + SetMetaData(ctx context.Context, accountName, shareName, path, fileName string, metaData map[string]string) (result autorest.Response, err error) + GetMetaData(ctx context.Context, accountName, shareName, path, fileName string) (result GetMetaDataResult, err error) + AbortCopy(ctx context.Context, accountName, shareName, path, fileName, copyID string) (result autorest.Response, err error) + GetFile(ctx context.Context, accountName, shareName, path, fileName string, parallelism int) (result autorest.Response, outputBytes []byte, err error) + GetResourceID(accountName, shareName, directoryName, filePath string) string + ListRanges(ctx context.Context, accountName, shareName, path, fileName string) (result ListRangesResult, err error) + GetProperties(ctx context.Context, accountName, shareName, path, fileName string) (result GetResult, err error) + Delete(ctx context.Context, accountName, shareName, path, fileName string) (result autorest.Response, err error) + Create(ctx context.Context, accountName, shareName, path, fileName string, input CreateInput) (result autorest.Response, err error) + CopyAndWait(ctx context.Context, accountName, shareName, path, fileName string, input CopyInput, pollDuration time.Duration) (result CopyResult, err error) +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/client.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/client.go new file mode 100644 index 000000000000..ecca81586b5a --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/client.go @@ -0,0 +1,25 @@ +package files + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Client is the base client for File Storage Shares. +type Client struct { + autorest.Client + BaseURI string +} + +// New creates an instance of the Client client. +func New() Client { + return NewWithEnvironment(azure.PublicCloud) +} + +// NewWithEnvironment creates an instance of the Client client. +func NewWithEnvironment(environment azure.Environment) Client { + return Client{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: environment.StorageEndpointSuffix, + } +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/copy.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/copy.go new file mode 100644 index 000000000000..31768b3d52b7 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/copy.go @@ -0,0 +1,132 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type CopyInput struct { + // Specifies the URL of the source file or blob, up to 2 KB in length. + // + // To copy a file to another file within the same storage account, you may use Shared Key to authenticate + // the source file. If you are copying a file from another storage account, or if you are copying a blob from + // the same storage account or another storage account, then you must authenticate the source file or blob using a + // shared access signature. If the source is a public blob, no authentication is required to perform the copy + // operation. A file in a share snapshot can also be specified as a copy source. + CopySource string + + MetaData map[string]string +} + +type CopyResult struct { + autorest.Response + + // The CopyID, which can be passed to AbortCopy to abort the copy. + CopyID string + + // Either `success` or `pending` + CopySuccess string +} + +// Copy copies a blob or file to a destination file within the storage account asynchronously. +func (client Client) Copy(ctx context.Context, accountName, shareName, path, fileName string, input CopyInput) (result CopyResult, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "Copy", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "Copy", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "Copy", "`shareName` must be a lower-cased string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "Copy", "`fileName` cannot be an empty string.") + } + if input.CopySource == "" { + return result, validation.NewError("files.Client", "Copy", "`input.CopySource` cannot be an empty string.") + } + if err := metadata.Validate(input.MetaData); err != nil { + return result, validation.NewError("files.Client", "Copy", fmt.Sprintf("`input.MetaData` is not valid: %s.", err)) + } + + req, err := client.CopyPreparer(ctx, accountName, shareName, path, fileName, input) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "Copy", nil, "Failure preparing request") + return + } + + resp, err := client.CopySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "Copy", resp, "Failure sending request") + return + } + + result, err = client.CopyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "Copy", resp, "Failure responding to request") + return + } + + return +} + +// CopyPreparer prepares the Copy request. +func (client Client) CopyPreparer(ctx context.Context, accountName, shareName, path, fileName string, input CopyInput) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + "x-ms-copy-source": input.CopySource, + } + + headers = metadata.SetIntoHeaders(headers, input.MetaData) + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/xml; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithHeaders(headers)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CopySender sends the Copy request. The method will close the +// http.Response Body if it receives an error. +func (client Client) CopySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CopyResponder handles the response to the Copy request. The method always +// closes the http.Response Body. +func (client Client) CopyResponder(resp *http.Response) (result CopyResult, err error) { + if resp != nil && resp.Header != nil { + result.CopyID = resp.Header.Get("x-ms-copy-id") + result.CopySuccess = resp.Header.Get("x-ms-copy-status") + } + + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusAccepted), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/copy_abort.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/copy_abort.go new file mode 100644 index 000000000000..2f0913185888 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/copy_abort.go @@ -0,0 +1,104 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" +) + +// AbortCopy aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata +func (client Client) AbortCopy(ctx context.Context, accountName, shareName, path, fileName, copyID string) (result autorest.Response, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "AbortCopy", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "AbortCopy", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "AbortCopy", "`shareName` must be a lower-cased string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "AbortCopy", "`fileName` cannot be an empty string.") + } + if copyID == "" { + return result, validation.NewError("files.Client", "AbortCopy", "`copyID` cannot be an empty string.") + } + + req, err := client.AbortCopyPreparer(ctx, accountName, shareName, path, fileName, copyID) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "AbortCopy", nil, "Failure preparing request") + return + } + + resp, err := client.AbortCopySender(req) + if err != nil { + result = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "AbortCopy", resp, "Failure sending request") + return + } + + result, err = client.AbortCopyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "AbortCopy", resp, "Failure responding to request") + return + } + + return +} + +// AbortCopyPreparer prepares the AbortCopy request. +func (client Client) AbortCopyPreparer(ctx context.Context, accountName, shareName, path, fileName, copyID string) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + queryParameters := map[string]interface{}{ + "comp": autorest.Encode("query", "copy"), + "copyid": autorest.Encode("query", copyID), + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + "x-ms-copy-action": "abort", + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/xml; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithQueryParameters(queryParameters), + autorest.WithHeaders(headers)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// AbortCopySender sends the AbortCopy request. The method will close the +// http.Response Body if it receives an error. +func (client Client) AbortCopySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// AbortCopyResponder handles the response to the AbortCopy request. The method always +// closes the http.Response Body. +func (client Client) AbortCopyResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusNoContent), + autorest.ByClosing()) + result = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/copy_wait.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/copy_wait.go new file mode 100644 index 000000000000..e6a646b1017b --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/copy_wait.go @@ -0,0 +1,55 @@ +package files + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +type CopyAndWaitResult struct { + autorest.Response + + CopyID string +} + +const DefaultCopyPollDuration = 15 * time.Second + +// CopyAndWait is a convenience method which doesn't exist in the API, which copies the file and then waits for the copy to complete +func (client Client) CopyAndWait(ctx context.Context, accountName, shareName, path, fileName string, input CopyInput, pollDuration time.Duration) (result CopyResult, err error) { + copy, e := client.Copy(ctx, accountName, shareName, path, fileName, input) + if err != nil { + result.Response = copy.Response + err = fmt.Errorf("Error copying: %s", e) + return + } + + result.CopyID = copy.CopyID + + // since the API doesn't return a LRO, this is a hack which also polls every 10s, but should be sufficient + for true { + props, e := client.GetProperties(ctx, accountName, shareName, path, fileName) + if e != nil { + result.Response = copy.Response + err = fmt.Errorf("Error waiting for copy: %s", e) + return + } + + switch strings.ToLower(props.CopyStatus) { + case "pending": + time.Sleep(pollDuration) + continue + + case "success": + return + + default: + err = fmt.Errorf("Unexpected CopyState %q", e) + return + } + } + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/create.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/create.go new file mode 100644 index 000000000000..d2b4ff358120 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/create.go @@ -0,0 +1,169 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type CreateInput struct { + // This header specifies the maximum size for the file, up to 1 TiB. + ContentLength int64 + + // The MIME content type of the file + // If not specified, the default type is application/octet-stream. + ContentType *string + + // Specifies which content encodings have been applied to the file. + // This value is returned to the client when the Get File operation is performed + // on the file resource and can be used to decode file content. + ContentEncoding *string + + // Specifies the natural languages used by this resource. + ContentLanguage *string + + // The File service stores this value but does not use or modify it. + CacheControl *string + + // Sets the file's MD5 hash. + ContentMD5 *string + + // Sets the file’s Content-Disposition header. + ContentDisposition *string + + // The time at which this file was created at - if omitted, this'll be set to "now" + // This maps to the `x-ms-file-creation-time` field. + CreatedAt *time.Time + + // The time at which this file was last modified - if omitted, this'll be set to "now" + // This maps to the `x-ms-file-last-write-time` field. + LastModified *time.Time + + // MetaData is a mapping of key value pairs which should be assigned to this file + MetaData map[string]string +} + +// Create creates a new file or replaces a file. +func (client Client) Create(ctx context.Context, accountName, shareName, path, fileName string, input CreateInput) (result autorest.Response, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "Create", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "Create", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "Create", "`shareName` must be a lower-cased string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "Create", "`fileName` cannot be an empty string.") + } + if err := metadata.Validate(input.MetaData); err != nil { + return result, validation.NewError("files.Client", "Create", "`input.MetaData` cannot be an empty string.") + } + + req, err := client.CreatePreparer(ctx, accountName, shareName, path, fileName, input) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "Create", nil, "Failure preparing request") + return + } + + resp, err := client.CreateSender(req) + if err != nil { + result = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "Create", resp, "Failure sending request") + return + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "Create", resp, "Failure responding to request") + return + } + + return +} + +// CreatePreparer prepares the Create request. +func (client Client) CreatePreparer(ctx context.Context, accountName, shareName, path, fileName string, input CreateInput) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + var coalesceDate = func(input *time.Time, defaultVal string) string { + if input == nil { + return defaultVal + } + + return input.Format(time.RFC1123) + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + "x-ms-content-length": input.ContentLength, + "x-ms-type": "file", + + "x-ms-file-permission": "inherit", // TODO: expose this in future + "x-ms-file-attributes": "None", // TODO: expose this in future + "x-ms-file-creation-time": coalesceDate(input.CreatedAt, "now"), + "x-ms-file-last-write-time": coalesceDate(input.LastModified, "now"), + } + + if input.ContentDisposition != nil { + headers["x-ms-content-disposition"] = *input.ContentDisposition + } + + if input.ContentEncoding != nil { + headers["x-ms-content-encoding"] = *input.ContentEncoding + } + + if input.ContentMD5 != nil { + headers["x-ms-content-md5"] = *input.ContentMD5 + } + + if input.ContentType != nil { + headers["x-ms-content-type"] = *input.ContentType + } + + headers = metadata.SetIntoHeaders(headers, input.MetaData) + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/xml; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithHeaders(headers)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client Client) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client Client) CreateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated), + autorest.ByClosing()) + result = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/delete.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/delete.go new file mode 100644 index 000000000000..5debd767d1fd --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/delete.go @@ -0,0 +1,94 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" +) + +// Delete immediately deletes the file from the File Share. +func (client Client) Delete(ctx context.Context, accountName, shareName, path, fileName string) (result autorest.Response, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "Delete", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "Delete", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "Delete", "`shareName` must be a lower-cased string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "Delete", "`fileName` cannot be an empty string.") + } + + req, err := client.DeletePreparer(ctx, accountName, shareName, path, fileName) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "Delete", resp, "Failure responding to request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client Client) DeletePreparer(ctx context.Context, accountName, shareName, path, fileName string) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/xml; charset=utf-8"), + autorest.AsDelete(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithHeaders(headers)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client Client) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client Client) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusAccepted), + autorest.ByClosing()) + result = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/metadata_get.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/metadata_get.go new file mode 100644 index 000000000000..fd62f90aec8f --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/metadata_get.go @@ -0,0 +1,111 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type GetMetaDataResult struct { + autorest.Response + + MetaData map[string]string +} + +// GetMetaData returns the MetaData for the specified File. +func (client Client) GetMetaData(ctx context.Context, accountName, shareName, path, fileName string) (result GetMetaDataResult, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "GetMetaData", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "GetMetaData", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "GetMetaData", "`shareName` must be a lower-cased string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "GetMetaData", "`fileName` cannot be an empty string.") + } + + req, err := client.GetMetaDataPreparer(ctx, accountName, shareName, path, fileName) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "GetMetaData", nil, "Failure preparing request") + return + } + + resp, err := client.GetMetaDataSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "GetMetaData", resp, "Failure sending request") + return + } + + result, err = client.GetMetaDataResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "GetMetaData", resp, "Failure responding to request") + return + } + + return +} + +// GetMetaDataPreparer prepares the GetMetaData request. +func (client Client) GetMetaDataPreparer(ctx context.Context, accountName, shareName, path, fileName string) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + queryParameters := map[string]interface{}{ + "comp": autorest.Encode("query", "metadata"), + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/xml; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithQueryParameters(queryParameters), + autorest.WithHeaders(headers)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetMetaDataSender sends the GetMetaData request. The method will close the +// http.Response Body if it receives an error. +func (client Client) GetMetaDataSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetMetaDataResponder handles the response to the GetMetaData request. The method always +// closes the http.Response Body. +func (client Client) GetMetaDataResponder(resp *http.Response) (result GetMetaDataResult, err error) { + if resp != nil && resp.Header != nil { + result.MetaData = metadata.ParseFromHeaders(resp.Header) + } + + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + //metadata.ByParsingFromHeaders(&result.MetaData), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/metadata_set.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/metadata_set.go new file mode 100644 index 000000000000..41e3ffcb8ff9 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/metadata_set.go @@ -0,0 +1,105 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +// SetMetaData updates the specified File to have the specified MetaData. +func (client Client) SetMetaData(ctx context.Context, accountName, shareName, path, fileName string, metaData map[string]string) (result autorest.Response, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "SetMetaData", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "SetMetaData", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "SetMetaData", "`shareName` must be a lower-cased string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "SetMetaData", "`fileName` cannot be an empty string.") + } + if err := metadata.Validate(metaData); err != nil { + return result, validation.NewError("files.Client", "SetMetaData", fmt.Sprintf("`metaData` is not valid: %s.", err)) + } + + req, err := client.SetMetaDataPreparer(ctx, accountName, shareName, path, fileName, metaData) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "SetMetaData", nil, "Failure preparing request") + return + } + + resp, err := client.SetMetaDataSender(req) + if err != nil { + result = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "SetMetaData", resp, "Failure sending request") + return + } + + result, err = client.SetMetaDataResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "SetMetaData", resp, "Failure responding to request") + return + } + + return +} + +// SetMetaDataPreparer prepares the SetMetaData request. +func (client Client) SetMetaDataPreparer(ctx context.Context, accountName, shareName, path, fileName string, metaData map[string]string) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + queryParameters := map[string]interface{}{ + "comp": autorest.Encode("query", "metadata"), + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + } + + headers = metadata.SetIntoHeaders(headers, metaData) + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/xml; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithQueryParameters(queryParameters), + autorest.WithHeaders(headers)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// SetMetaDataSender sends the SetMetaData request. The method will close the +// http.Response Body if it receives an error. +func (client Client) SetMetaDataSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// SetMetaDataResponder handles the response to the SetMetaData request. The method always +// closes the http.Response Body. +func (client Client) SetMetaDataResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/properties_get.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/properties_get.go new file mode 100644 index 000000000000..c6a0c399d2d7 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/properties_get.go @@ -0,0 +1,144 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type GetResult struct { + autorest.Response + + CacheControl string + ContentDisposition string + ContentEncoding string + ContentLanguage string + ContentLength *int64 + ContentMD5 string + ContentType string + CopyID string + CopyStatus string + CopySource string + CopyProgress string + CopyStatusDescription string + CopyCompletionTime string + Encrypted bool + + MetaData map[string]string +} + +// GetProperties returns the Properties for the specified file +func (client Client) GetProperties(ctx context.Context, accountName, shareName, path, fileName string) (result GetResult, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "GetProperties", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "GetProperties", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "GetProperties", "`shareName` must be a lower-cased string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "GetProperties", "`fileName` cannot be an empty string.") + } + + req, err := client.GetPropertiesPreparer(ctx, accountName, shareName, path, fileName) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "GetProperties", nil, "Failure preparing request") + return + } + + resp, err := client.GetPropertiesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "GetProperties", resp, "Failure sending request") + return + } + + result, err = client.GetPropertiesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "GetProperties", resp, "Failure responding to request") + return + } + + return +} + +// GetPropertiesPreparer prepares the GetProperties request. +func (client Client) GetPropertiesPreparer(ctx context.Context, accountName, shareName, path, fileName string) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/xml; charset=utf-8"), + autorest.AsHead(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithHeaders(headers)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetPropertiesSender sends the GetProperties request. The method will close the +// http.Response Body if it receives an error. +func (client Client) GetPropertiesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetPropertiesResponder handles the response to the GetProperties request. The method always +// closes the http.Response Body. +func (client Client) GetPropertiesResponder(resp *http.Response) (result GetResult, err error) { + if resp != nil && resp.Header != nil { + result.CacheControl = resp.Header.Get("Cache-Control") + result.ContentDisposition = resp.Header.Get("Content-Disposition") + result.ContentEncoding = resp.Header.Get("Content-Encoding") + result.ContentLanguage = resp.Header.Get("Content-Language") + result.ContentMD5 = resp.Header.Get("x-ms-content-md5") + result.ContentType = resp.Header.Get("Content-Type") + result.CopyID = resp.Header.Get("x-ms-copy-id") + result.CopyProgress = resp.Header.Get("x-ms-copy-progress") + result.CopySource = resp.Header.Get("x-ms-copy-source") + result.CopyStatus = resp.Header.Get("x-ms-copy-status") + result.CopyStatusDescription = resp.Header.Get("x-ms-copy-status-description") + result.CopyCompletionTime = resp.Header.Get("x-ms-copy-completion-time") + result.Encrypted = strings.EqualFold(resp.Header.Get("x-ms-server-encrypted"), "true") + result.MetaData = metadata.ParseFromHeaders(resp.Header) + + contentLengthRaw := resp.Header.Get("Content-Length") + if contentLengthRaw != "" { + contentLength, err := strconv.Atoi(contentLengthRaw) + if err != nil { + return result, fmt.Errorf("Error parsing %q for Content-Length as an integer: %s", contentLengthRaw, err) + } + contentLengthI64 := int64(contentLength) + result.ContentLength = &contentLengthI64 + } + } + + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/properties_set.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/properties_set.go new file mode 100644 index 000000000000..b5514be5c0dd --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/properties_set.go @@ -0,0 +1,182 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" +) + +type SetPropertiesInput struct { + // Resizes a file to the specified size. + // If the specified byte value is less than the current size of the file, + // then all ranges above the specified byte value are cleared. + ContentLength *int64 + + // Modifies the cache control string for the file. + // If this property is not specified on the request, then the property will be cleared for the file. + // Subsequent calls to Get File Properties will not return this property, + // unless it is explicitly set on the file again. + ContentControl *string + + // Sets the file’s Content-Disposition header. + // If this property is not specified on the request, then the property will be cleared for the file. + // Subsequent calls to Get File Properties will not return this property, + // unless it is explicitly set on the file again. + ContentDisposition *string + + // Sets the file's content encoding. + // If this property is not specified on the request, then the property will be cleared for the file. + // Subsequent calls to Get File Properties will not return this property, + // unless it is explicitly set on the file again. + ContentEncoding *string + + // Sets the file's content language. + // If this property is not specified on the request, then the property will be cleared for the file. + // Subsequent calls to Get File Properties will not return this property, + // unless it is explicitly set on the file again. + ContentLanguage *string + + // Sets the file's MD5 hash. + // If this property is not specified on the request, then the property will be cleared for the file. + // Subsequent calls to Get File Properties will not return this property, + // unless it is explicitly set on the file again. + ContentMD5 *string + + // Sets the file's content type. + // If this property is not specified on the request, then the property will be cleared for the file. + // Subsequent calls to Get File Properties will not return this property, + // unless it is explicitly set on the file again. + ContentType *string + + // The time at which this file was created at - if omitted, this'll be set to "now" + // This maps to the `x-ms-file-creation-time` field. + CreatedAt *time.Time + + // The time at which this file was last modified - if omitted, this'll be set to "now" + // This maps to the `x-ms-file-last-write-time` field. + LastModified *time.Time +} + +// SetProperties sets the specified properties on the specified File +func (client Client) SetProperties(ctx context.Context, accountName, shareName, path, fileName string, input SetPropertiesInput) (result autorest.Response, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "SetProperties", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "SetProperties", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "SetProperties", "`shareName` must be a lower-cased string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "SetProperties", "`fileName` cannot be an empty string.") + } + + req, err := client.SetPropertiesPreparer(ctx, accountName, shareName, path, fileName, input) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "SetProperties", nil, "Failure preparing request") + return + } + + resp, err := client.SetPropertiesSender(req) + if err != nil { + result = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "SetProperties", resp, "Failure sending request") + return + } + + result, err = client.SetPropertiesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "SetProperties", resp, "Failure responding to request") + return + } + + return +} + +// SetPropertiesPreparer prepares the SetProperties request. +func (client Client) SetPropertiesPreparer(ctx context.Context, accountName, shareName, path, fileName string, input SetPropertiesInput) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + var coalesceDate = func(input *time.Time, defaultVal string) string { + if input == nil { + return defaultVal + } + + return input.Format(time.RFC1123) + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + "x-ms-type": "file", + + "x-ms-file-permission": "inherit", // TODO: expose this in future + "x-ms-file-attributes": "None", // TODO: expose this in future + "x-ms-file-creation-time": coalesceDate(input.CreatedAt, "now"), + "x-ms-file-last-write-time": coalesceDate(input.LastModified, "now"), + } + + if input.ContentControl != nil { + headers["x-ms-cache-control"] = *input.ContentControl + } + if input.ContentDisposition != nil { + headers["x-ms-content-disposition"] = *input.ContentDisposition + } + if input.ContentEncoding != nil { + headers["x-ms-content-encoding"] = *input.ContentEncoding + } + if input.ContentLanguage != nil { + headers["x-ms-content-language"] = *input.ContentLanguage + } + if input.ContentLength != nil { + headers["x-ms-content-length"] = *input.ContentLength + } + if input.ContentMD5 != nil { + headers["x-ms-content-md5"] = *input.ContentMD5 + } + if input.ContentType != nil { + headers["x-ms-content-type"] = *input.ContentType + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/xml; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithHeaders(headers)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// SetPropertiesSender sends the SetProperties request. The method will close the +// http.Response Body if it receives an error. +func (client Client) SetPropertiesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// SetPropertiesResponder handles the response to the SetProperties request. The method always +// closes the http.Response Body. +func (client Client) SetPropertiesResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated), + autorest.ByClosing()) + result = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_clear.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_clear.go new file mode 100644 index 000000000000..5d8145fae422 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_clear.go @@ -0,0 +1,112 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" +) + +type ClearByteRangeInput struct { + StartBytes int64 + EndBytes int64 +} + +// ClearByteRange clears the specified Byte Range from within the specified File +func (client Client) ClearByteRange(ctx context.Context, accountName, shareName, path, fileName string, input ClearByteRangeInput) (result autorest.Response, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "ClearByteRange", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "ClearByteRange", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "ClearByteRange", "`shareName` must be a lower-cased string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "ClearByteRange", "`fileName` cannot be an empty string.") + } + if input.StartBytes < 0 { + return result, validation.NewError("files.Client", "ClearByteRange", "`input.StartBytes` must be greater or equal to 0.") + } + if input.EndBytes <= 0 { + return result, validation.NewError("files.Client", "ClearByteRange", "`input.EndBytes` must be greater than 0.") + } + + req, err := client.ClearByteRangePreparer(ctx, accountName, shareName, path, fileName, input) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "ClearByteRange", nil, "Failure preparing request") + return + } + + resp, err := client.ClearByteRangeSender(req) + if err != nil { + result = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "ClearByteRange", resp, "Failure sending request") + return + } + + result, err = client.ClearByteRangeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "ClearByteRange", resp, "Failure responding to request") + return + } + + return +} + +// ClearByteRangePreparer prepares the ClearByteRange request. +func (client Client) ClearByteRangePreparer(ctx context.Context, accountName, shareName, path, fileName string, input ClearByteRangeInput) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + queryParameters := map[string]interface{}{ + "comp": autorest.Encode("query", "range"), + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + "x-ms-write": "clear", + "x-ms-range": fmt.Sprintf("bytes=%d-%d", input.StartBytes, input.EndBytes), + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/xml; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithHeaders(headers), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ClearByteRangeSender sends the ClearByteRange request. The method will close the +// http.Response Body if it receives an error. +func (client Client) ClearByteRangeSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ClearByteRangeResponder handles the response to the ClearByteRange request. The method always +// closes the http.Response Body. +func (client Client) ClearByteRangeResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated), + autorest.ByClosing()) + result = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_get.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_get.go new file mode 100644 index 000000000000..733d3f525105 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_get.go @@ -0,0 +1,121 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" +) + +type GetByteRangeInput struct { + StartBytes int64 + EndBytes int64 +} + +type GetByteRangeResult struct { + autorest.Response + + Contents []byte +} + +// GetByteRange returns the specified Byte Range from the specified File. +func (client Client) GetByteRange(ctx context.Context, accountName, shareName, path, fileName string, input GetByteRangeInput) (result GetByteRangeResult, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "GetByteRange", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "GetByteRange", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "GetByteRange", "`shareName` must be a lower-cased string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "GetByteRange", "`fileName` cannot be an empty string.") + } + if input.StartBytes < 0 { + return result, validation.NewError("files.Client", "GetByteRange", "`input.StartBytes` must be greater or equal to 0.") + } + if input.EndBytes <= 0 { + return result, validation.NewError("files.Client", "GetByteRange", "`input.EndBytes` must be greater than 0.") + } + expectedBytes := input.EndBytes - input.StartBytes + if expectedBytes < (4 * 1024) { + return result, validation.NewError("files.Client", "GetByteRange", "Requested Byte Range must be at least 4KB.") + } + if expectedBytes > (4 * 1024 * 1024) { + return result, validation.NewError("files.Client", "GetByteRange", "Requested Byte Range must be at most 4MB.") + } + + req, err := client.GetByteRangePreparer(ctx, accountName, shareName, path, fileName, input) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "GetByteRange", nil, "Failure preparing request") + return + } + + resp, err := client.GetByteRangeSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "GetByteRange", resp, "Failure sending request") + return + } + + result, err = client.GetByteRangeResponder(resp, expectedBytes) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "GetByteRange", resp, "Failure responding to request") + return + } + + return +} + +// GetByteRangePreparer prepares the GetByteRange request. +func (client Client) GetByteRangePreparer(ctx context.Context, accountName, shareName, path, fileName string, input GetByteRangeInput) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + "x-ms-range": fmt.Sprintf("bytes=%d-%d", input.StartBytes, input.EndBytes-1), + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithHeaders(headers)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetByteRangeSender sends the GetByteRange request. The method will close the +// http.Response Body if it receives an error. +func (client Client) GetByteRangeSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetByteRangeResponder handles the response to the GetByteRange request. The method always +// closes the http.Response Body. +func (client Client) GetByteRangeResponder(resp *http.Response, length int64) (result GetByteRangeResult, err error) { + result.Contents = make([]byte, length) + + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusPartialContent), + autorest.ByUnmarshallingBytes(&result.Contents), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_get_file.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_get_file.go new file mode 100644 index 000000000000..9e5be17f85fc --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_get_file.go @@ -0,0 +1,128 @@ +package files + +import ( + "context" + "fmt" + "log" + "math" + "runtime" + "sync" + + "github.com/Azure/go-autorest/autorest" +) + +// GetFile is a helper method to download a file by chunking it automatically +func (client Client) GetFile(ctx context.Context, accountName, shareName, path, fileName string, parallelism int) (result autorest.Response, outputBytes []byte, err error) { + + // first look up the file and check out how many bytes it is + file, e := client.GetProperties(ctx, accountName, shareName, path, fileName) + if err != nil { + result = file.Response + err = e + return + } + + if file.ContentLength == nil { + err = fmt.Errorf("Content-Length was nil!") + return + } + + length := int64(*file.ContentLength) + chunkSize := int64(4 * 1024 * 1024) // 4MB + + if chunkSize > length { + chunkSize = length + } + + // then split that up into chunks and retrieve it retrieve it into the 'results' set + chunks := int(math.Ceil(float64(length) / float64(chunkSize))) + workerCount := parallelism * runtime.NumCPU() + if workerCount > chunks { + workerCount = chunks + } + + var waitGroup sync.WaitGroup + waitGroup.Add(workerCount) + + results := make([]*downloadFileChunkResult, chunks) + errors := make(chan error, chunkSize) + + for i := 0; i < chunks; i++ { + go func(i int) { + log.Printf("[DEBUG] Downloading Chunk %d of %d", i+1, chunks) + + dfci := downloadFileChunkInput{ + thisChunk: i, + chunkSize: chunkSize, + fileSize: length, + } + + result, err := client.downloadFileChunk(ctx, accountName, shareName, path, fileName, dfci) + if err != nil { + errors <- err + waitGroup.Done() + return + } + + // if there's no error, we should have bytes, so this is safe + results[i] = result + + waitGroup.Done() + }(i) + } + waitGroup.Wait() + + // TODO: we should switch to hashicorp/multi-error here + if len(errors) > 0 { + err = fmt.Errorf("Error downloading file: %s", <-errors) + return + } + + // then finally put it all together, in order and return it + output := make([]byte, length) + for _, v := range results { + copy(output[v.startBytes:v.endBytes], v.bytes) + } + + outputBytes = output + return +} + +type downloadFileChunkInput struct { + thisChunk int + chunkSize int64 + fileSize int64 +} + +type downloadFileChunkResult struct { + startBytes int64 + endBytes int64 + bytes []byte +} + +func (client Client) downloadFileChunk(ctx context.Context, accountName, shareName, path, fileName string, input downloadFileChunkInput) (*downloadFileChunkResult, error) { + startBytes := input.chunkSize * int64(input.thisChunk) + endBytes := startBytes + input.chunkSize + + // the last chunk may exceed the size of the file + remaining := input.fileSize - startBytes + if input.chunkSize > remaining { + endBytes = startBytes + remaining + } + + getInput := GetByteRangeInput{ + StartBytes: startBytes, + EndBytes: endBytes, + } + result, err := client.GetByteRange(ctx, accountName, shareName, path, fileName, getInput) + if err != nil { + return nil, fmt.Errorf("Error putting bytes: %s", err) + } + + output := downloadFileChunkResult{ + startBytes: startBytes, + endBytes: endBytes, + bytes: result.Contents, + } + return &output, nil +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_put.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_put.go new file mode 100644 index 000000000000..208becc34bd1 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_put.go @@ -0,0 +1,130 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" +) + +type PutByteRangeInput struct { + StartBytes int64 + EndBytes int64 + + // Content is the File Contents for the specified range + // which can be at most 4MB + Content []byte +} + +// PutByteRange puts the specified Byte Range in the specified File. +func (client Client) PutByteRange(ctx context.Context, accountName, shareName, path, fileName string, input PutByteRangeInput) (result autorest.Response, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "PutByteRange", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "PutByteRange", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "PutByteRange", "`shareName` must be a lower-cased string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "PutByteRange", "`fileName` cannot be an empty string.") + } + if input.StartBytes < 0 { + return result, validation.NewError("files.Client", "PutByteRange", "`input.StartBytes` must be greater or equal to 0.") + } + if input.EndBytes <= 0 { + return result, validation.NewError("files.Client", "PutByteRange", "`input.EndBytes` must be greater than 0.") + } + + expectedBytes := input.EndBytes - input.StartBytes + actualBytes := len(input.Content) + if expectedBytes != int64(actualBytes) { + return result, validation.NewError("files.Client", "PutByteRange", fmt.Sprintf("The specified byte-range (%d) didn't match the content size (%d).", expectedBytes, actualBytes)) + } + if expectedBytes < (4 * 1024) { + return result, validation.NewError("files.Client", "PutByteRange", "Specified Byte Range must be at least 4KB.") + } + + if expectedBytes > (4 * 1024 * 1024) { + return result, validation.NewError("files.Client", "PutByteRange", "Specified Byte Range must be at most 4MB.") + } + + req, err := client.PutByteRangePreparer(ctx, accountName, shareName, path, fileName, input) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "PutByteRange", nil, "Failure preparing request") + return + } + + resp, err := client.PutByteRangeSender(req) + if err != nil { + result = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "PutByteRange", resp, "Failure sending request") + return + } + + result, err = client.PutByteRangeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "PutByteRange", resp, "Failure responding to request") + return + } + + return +} + +// PutByteRangePreparer prepares the PutByteRange request. +func (client Client) PutByteRangePreparer(ctx context.Context, accountName, shareName, path, fileName string, input PutByteRangeInput) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + queryParameters := map[string]interface{}{ + "comp": autorest.Encode("query", "range"), + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + "x-ms-write": "update", + "x-ms-range": fmt.Sprintf("bytes=%d-%d", input.StartBytes, input.EndBytes-1), + "Content-Length": int(len(input.Content)), + } + + preparer := autorest.CreatePreparer( + autorest.AsPut(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithHeaders(headers), + autorest.WithQueryParameters(queryParameters), + autorest.WithBytes(&input.Content)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PutByteRangeSender sends the PutByteRange request. The method will close the +// http.Response Body if it receives an error. +func (client Client) PutByteRangeSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// PutByteRangeResponder handles the response to the PutByteRange request. The method always +// closes the http.Response Body. +func (client Client) PutByteRangeResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated), + autorest.ByClosing()) + result = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_put_file.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_put_file.go new file mode 100644 index 000000000000..a39cd377cee8 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_put_file.go @@ -0,0 +1,107 @@ +package files + +import ( + "context" + "fmt" + "io" + "log" + "math" + "os" + "runtime" + "sync" + + "github.com/Azure/go-autorest/autorest" +) + +// PutFile is a helper method which takes a file, and automatically chunks it up, rather than having to do this yourself +func (client Client) PutFile(ctx context.Context, accountName, shareName, path, fileName string, file *os.File, parallelism int) error { + fileInfo, err := file.Stat() + if err != nil { + return fmt.Errorf("Error loading file info: %s", err) + } + + fileSize := fileInfo.Size() + chunkSize := 4 * 1024 * 1024 // 4MB + if chunkSize > int(fileSize) { + chunkSize = int(fileSize) + } + chunks := int(math.Ceil(float64(fileSize) / float64(chunkSize*1.0))) + + workerCount := parallelism * runtime.NumCPU() + if workerCount > chunks { + workerCount = chunks + } + + var waitGroup sync.WaitGroup + waitGroup.Add(workerCount) + errors := make(chan error, chunkSize) + + for i := 0; i < chunks; i++ { + go func(i int) { + log.Printf("[DEBUG] Chunk %d of %d", i+1, chunks) + + uci := uploadChunkInput{ + thisChunk: i, + chunkSize: chunkSize, + fileSize: fileSize, + } + + _, err := client.uploadChunk(ctx, accountName, shareName, path, fileName, uci, file) + if err != nil { + errors <- err + waitGroup.Done() + return + } + + waitGroup.Done() + return + }(i) + } + waitGroup.Wait() + + // TODO: we should switch to hashicorp/multi-error here + if len(errors) > 0 { + return fmt.Errorf("Error uploading file: %s", <-errors) + } + + return nil +} + +type uploadChunkInput struct { + thisChunk int + chunkSize int + fileSize int64 +} + +func (client Client) uploadChunk(ctx context.Context, accountName, shareName, path, fileName string, input uploadChunkInput, file *os.File) (result autorest.Response, err error) { + startBytes := int64(input.chunkSize * input.thisChunk) + endBytes := startBytes + int64(input.chunkSize) + + // the last size may exceed the size of the file + remaining := input.fileSize - startBytes + if int64(input.chunkSize) > remaining { + endBytes = startBytes + remaining + } + + bytesToRead := int(endBytes) - int(startBytes) + bytes := make([]byte, bytesToRead) + + _, err = file.ReadAt(bytes, startBytes) + if err != nil { + if err != io.EOF { + return result, fmt.Errorf("Error reading bytes: %s", err) + } + } + + putBytesInput := PutByteRangeInput{ + StartBytes: startBytes, + EndBytes: endBytes, + Content: bytes, + } + result, err = client.PutByteRange(ctx, accountName, shareName, path, fileName, putBytesInput) + if err != nil { + return result, fmt.Errorf("Error putting bytes: %s", err) + } + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/ranges_list.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/ranges_list.go new file mode 100644 index 000000000000..ea309f97ddb2 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/ranges_list.go @@ -0,0 +1,114 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" +) + +type ListRangesResult struct { + autorest.Response + + Ranges []Range `xml:"Range"` +} + +type Range struct { + Start string `xml:"Start"` + End string `xml:"End"` +} + +// ListRanges returns the list of valid ranges for the specified File. +func (client Client) ListRanges(ctx context.Context, accountName, shareName, path, fileName string) (result ListRangesResult, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "ListRanges", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "ListRanges", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "ListRanges", "`shareName` must be a lower-cased string.") + } + if path == "" { + return result, validation.NewError("files.Client", "ListRanges", "`path` cannot be an empty string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "ListRanges", "`fileName` cannot be an empty string.") + } + + req, err := client.ListRangesPreparer(ctx, accountName, shareName, path, fileName) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "ListRanges", nil, "Failure preparing request") + return + } + + resp, err := client.ListRangesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "ListRanges", resp, "Failure sending request") + return + } + + result, err = client.ListRangesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "ListRanges", resp, "Failure responding to request") + return + } + + return +} + +// ListRangesPreparer prepares the ListRanges request. +func (client Client) ListRangesPreparer(ctx context.Context, accountName, shareName, path, fileName string) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + queryParameters := map[string]interface{}{ + "comp": autorest.Encode("query", "rangelist"), + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/xml; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithHeaders(headers), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListRangesSender sends the ListRanges request. The method will close the +// http.Response Body if it receives an error. +func (client Client) ListRangesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListRangesResponder handles the response to the ListRanges request. The method always +// closes the http.Response Body. +func (client Client) ListRangesResponder(resp *http.Response) (result ListRangesResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingXML(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/resource_id.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/resource_id.go new file mode 100644 index 000000000000..f18e702e81d9 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/resource_id.go @@ -0,0 +1,64 @@ +package files + +import ( + "fmt" + "net/url" + "strings" + + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" +) + +// GetResourceID returns the Resource ID for the given File +// This can be useful when, for example, you're using this as a unique identifier +func (client Client) GetResourceID(accountName, shareName, directoryName, filePath string) string { + domain := endpoints.GetFileEndpoint(client.BaseURI, accountName) + return fmt.Sprintf("%s/%s/%s/%s", domain, shareName, directoryName, filePath) +} + +type ResourceID struct { + AccountName string + DirectoryName string + FileName string + ShareName string +} + +// ParseResourceID parses the specified Resource ID and returns an object +// which can be used to interact with Files within a Storage Share. +func ParseResourceID(id string) (*ResourceID, error) { + // example: https://account1.file.core.chinacloudapi.cn/share1/directory1/file1.txt + // example: https://account1.file.core.chinacloudapi.cn/share1/directory1/directory2/file1.txt + + if id == "" { + return nil, fmt.Errorf("`id` was empty") + } + + uri, err := url.Parse(id) + if err != nil { + return nil, fmt.Errorf("Error parsing ID as a URL: %s", err) + } + + accountName, err := endpoints.GetAccountNameFromEndpoint(uri.Host) + if err != nil { + return nil, fmt.Errorf("Error parsing Account Name: %s", err) + } + + path := strings.TrimPrefix(uri.Path, "/") + segments := strings.Split(path, "/") + if len(segments) == 0 { + return nil, fmt.Errorf("Expected the path to contain segments but got none") + } + + shareName := segments[0] + fileName := segments[len(segments)-1] + + directoryName := strings.TrimPrefix(path, shareName) + directoryName = strings.TrimPrefix(directoryName, "/") + directoryName = strings.TrimSuffix(directoryName, fileName) + directoryName = strings.TrimSuffix(directoryName, "/") + return &ResourceID{ + AccountName: *accountName, + ShareName: shareName, + DirectoryName: directoryName, + FileName: fileName, + }, nil +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/version.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/version.go new file mode 100644 index 000000000000..90529be1118f --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/version.go @@ -0,0 +1,14 @@ +package files + +import ( + "fmt" + + "github.com/tombuildsstuff/giovanni/version" +) + +// APIVersion is the version of the API used for all Storage API Operations +const APIVersion = "2019-12-12" + +func UserAgent() string { + return fmt.Sprintf("tombuildsstuff/giovanni/%s storage/%s", version.Number, APIVersion) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 271296c9975b..5574cdc8eab2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -375,6 +375,7 @@ github.com/tombuildsstuff/giovanni/storage/2019-12-12/blob/containers github.com/tombuildsstuff/giovanni/storage/2019-12-12/datalakestore/filesystems github.com/tombuildsstuff/giovanni/storage/2019-12-12/datalakestore/paths github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/directories +github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/shares github.com/tombuildsstuff/giovanni/storage/2019-12-12/queue/queues github.com/tombuildsstuff/giovanni/storage/2019-12-12/table/entities From f47025cfa1acc800130415713a9e75c72514b476 Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Mon, 23 Nov 2020 12:30:51 -0800 Subject: [PATCH 02/11] Testing --- .../resource_arm_storage_share_file.go | 99 +++++++++++++++++-- .../resource_arm_storage_share_file_test.go | 45 +++++++++ 2 files changed, 136 insertions(+), 8 deletions(-) diff --git a/azurerm/internal/services/storage/resource_arm_storage_share_file.go b/azurerm/internal/services/storage/resource_arm_storage_share_file.go index 3622cc35cb4a..f6d7e8a41fa6 100644 --- a/azurerm/internal/services/storage/resource_arm_storage_share_file.go +++ b/azurerm/internal/services/storage/resource_arm_storage_share_file.go @@ -2,10 +2,11 @@ package storage import ( "fmt" - "github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files" "log" "time" + "github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" @@ -53,11 +54,45 @@ func resourceArmStorageShareFile() *schema.Resource { }, "directory_name": { Type: schema.TypeString, + ForceNew: true, Optional: true, Default: "", ValidateFunc: validate.StorageShareDirectoryName, }, + "content_type": { + Type: schema.TypeString, + Optional: true, + Default: "application/octet-stream", + }, + + "content_length": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + ForceNew: true, + // TODO check for 512 divisibility + // ValidateFunc: + }, + + "content_encoding": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "content_md5": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "content_disposition": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "metadata": MetaDataSchema(), }, } @@ -102,8 +137,17 @@ func resourceArmStorageShareFileCreate(d *schema.ResourceData, meta interface{}) } input := files.CreateInput{ - MetaData: metaData, + MetaData: metaData, + ContentType: utils.String(d.Get("content_type").(string)), + ContentLength: int64(d.Get("content_length").(int)), + ContentEncoding: utils.String(d.Get("content_encoding").(string)), + ContentDisposition: utils.String(d.Get("content_disposition").(string)), } + + if v, ok := d.GetOk("content_md5"); ok { + input.ContentMD5 = utils.String(v.(string)) + } + if _, err := client.Create(ctx, accountName, shareName, directoryName, fileName, input); err != nil { return fmt.Errorf("Error creating File %q (File Share %q / Account %q): %+v", fileName, shareName, accountName, err) } @@ -141,9 +185,6 @@ func resourceArmStorageShareFileUpdate(d *schema.ResourceData, meta interface{}) return err } - metaDataRaw := d.Get("metadata").(map[string]interface{}) - metaData := ExpandMetaData(metaDataRaw) - account, err := storageClient.FindAccount(ctx, id.AccountName) if err != nil { return fmt.Errorf("Error retrieving Account %q for File %q (Share %q): %s", id.AccountName, id.FileName, id.ShareName, err) @@ -154,11 +195,48 @@ func resourceArmStorageShareFileUpdate(d *schema.ResourceData, meta interface{}) client, err := storageClient.FileShareFilesClient(ctx, *account) if err != nil { - return fmt.Errorf("Error building File Share File Client: %s", err) + return fmt.Errorf("Error building File Share Directories Client: %s", err) } - if _, err := client.SetMetaData(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName, metaData); err != nil { - return fmt.Errorf("Error updating MetaData for File %q (File Share %q / Account %q): %+v", id.FileName, id.ShareName, id.AccountName, err) + existing, err := client.GetProperties(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing File %q (File Share %q / Storage Account %q / Resource Group %q): %s", id.FileName, id.ShareName, id.AccountName, account.ResourceGroup, err) + } + } + + if d.HasChange("metadata") { + metaDataRaw := d.Get("metadata").(map[string]interface{}) + metaData := ExpandMetaData(metaDataRaw) + + account, err := storageClient.FindAccount(ctx, id.AccountName) + if err != nil { + return fmt.Errorf("Error retrieving Account %q for File %q (Share %q): %s", id.AccountName, id.FileName, id.ShareName, err) + } + if account == nil { + return fmt.Errorf("Unable to locate Storage Account %q!", id.AccountName) + } + + if _, err := client.SetMetaData(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName, metaData); err != nil { + return fmt.Errorf("Error updating MetaData for File %q (File Share %q / Account %q): %+v", id.FileName, id.ShareName, id.AccountName, err) + } + } + + if d.HasChange("content_type") || d.HasChange("content_length") || d.HasChange("content_encoding") || d.HasChange("content_disposition") || d.HasChange("content_md5") { + input := files.SetPropertiesInput{ + ContentType: utils.String(d.Get("content_type").(string)), + ContentLength: utils.Int64(int64(d.Get("content_length").(int))), + ContentEncoding: utils.String(d.Get("content_encoding").(string)), + ContentDisposition: utils.String(d.Get("content_disposition").(string)), + } + + if v, ok := d.GetOk("content_md5"); ok { + input.ContentMD5 = utils.String(v.(string)) + } + + if _, err := client.SetProperties(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName, input); err != nil { + return fmt.Errorf("Error creating File %q (File Share %q / Account %q): %+v", id.FileName, id.ShareName, id.AccountName, err) + } } return resourceArmStorageShareFileRead(d, meta) @@ -202,6 +280,11 @@ func resourceArmStorageShareFileRead(d *schema.ResourceData, meta interface{}) e if err := d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { return fmt.Errorf("Error setting `metadata`: %s", err) } + d.Set("content_type", props.ContentType) + d.Set("content_length", props.ContentLength) + d.Set("content_encoding", props.ContentEncoding) + d.Set("content_md5", props.ContentMD5) + d.Set("content_disposition", props.ContentDisposition) return nil } diff --git a/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go b/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go index 6c663d714409..639b0bf307cb 100644 --- a/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go +++ b/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go @@ -49,6 +49,25 @@ func TestAccAzureRMStorageShareFile_requiresImport(t *testing.T) { }) } +func TestAccAzureRMStorageShareFile_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_storage_share_file", "test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMStorageShareFileDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMStorageShareFile_complete(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMStorageShareFileExists(data.ResourceName), + ), + }, + data.ImportStep(), + }, + }) +} + func testCheckAzureRMStorageShareFileExists(resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { storageClient := acceptance.AzureProvider.Meta().(*clients.Client).Storage @@ -144,6 +163,10 @@ resource "azurerm_storage_share_file" "test" { name = "dir" share_name = azurerm_storage_share.test.name storage_account_name = azurerm_storage_account.test.name + + metadata = { + hello = "world" + } } `, template) } @@ -187,3 +210,25 @@ resource "azurerm_storage_share" "test" { } `, data.RandomInteger, data.Locations.Primary, data.RandomString) } + +func testAccAzureRMStorageShareFile_complete(data acceptance.TestData) string { + template := testAccAzureRMStorageShareFile_template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_share_file" "test" { + name = "dir" + share_name = azurerm_storage_share.test.name + storage_account_name = azurerm_storage_account.test.name + + content_type = "test_content_type" + content_length = 100 + content_encoding = "test_encoding" + content_disposition = "test_content_disposition" + + metadata = { + hello = "world" + } +} +`, template) +} From f5e47d40ef1ab6cc863e171054f7636e3e4ff38d Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Mon, 23 Nov 2020 22:03:21 -0800 Subject: [PATCH 03/11] Update/docs --- .../resource_arm_storage_share_file.go | 95 +++++++++---------- .../resource_arm_storage_share_file_test.go | 86 ++++++++++++++++- .../2019-12-12/file/files/properties_set.go | 12 ++- website/azurerm.erb | 4 + .../docs/r/storage_share_file.html.markdown | 90 ++++++++++++++++++ 5 files changed, 230 insertions(+), 57 deletions(-) create mode 100644 website/docs/r/storage_share_file.html.markdown diff --git a/azurerm/internal/services/storage/resource_arm_storage_share_file.go b/azurerm/internal/services/storage/resource_arm_storage_share_file.go index f6d7e8a41fa6..3e90856f9c70 100644 --- a/azurerm/internal/services/storage/resource_arm_storage_share_file.go +++ b/azurerm/internal/services/storage/resource_arm_storage_share_file.go @@ -3,6 +3,7 @@ package storage import ( "fmt" "log" + "os" "time" "github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files" @@ -66,15 +67,6 @@ func resourceArmStorageShareFile() *schema.Resource { Default: "application/octet-stream", }, - "content_length": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - ForceNew: true, - // TODO check for 512 divisibility - // ValidateFunc: - }, - "content_encoding": { Type: schema.TypeString, Optional: true, @@ -93,6 +85,19 @@ func resourceArmStorageShareFile() *schema.Resource { ValidateFunc: validation.StringIsNotEmpty, }, + "source": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + ForceNew: true, + }, + + "parallelism": { + Type: schema.TypeInt, + Optional: true, + Default: 4, + }, + "metadata": MetaDataSchema(), }, } @@ -108,9 +113,6 @@ func resourceArmStorageShareFileCreate(d *schema.ResourceData, meta interface{}) fileName := d.Get("name").(string) directoryName := d.Get("directory_name").(string) - metaDataRaw := d.Get("metadata").(map[string]interface{}) - metaData := ExpandMetaData(metaDataRaw) - account, err := storageClient.FindAccount(ctx, accountName) if err != nil { return fmt.Errorf("Error retrieving Account %q for File %q (Share %q): %s", accountName, fileName, shareName, err) @@ -137,9 +139,8 @@ func resourceArmStorageShareFileCreate(d *schema.ResourceData, meta interface{}) } input := files.CreateInput{ - MetaData: metaData, + MetaData: ExpandMetaData(d.Get("metadata").(map[string]interface{})), ContentType: utils.String(d.Get("content_type").(string)), - ContentLength: int64(d.Get("content_length").(int)), ContentEncoding: utils.String(d.Get("content_encoding").(string)), ContentDisposition: utils.String(d.Get("content_disposition").(string)), } @@ -148,26 +149,30 @@ func resourceArmStorageShareFileCreate(d *schema.ResourceData, meta interface{}) input.ContentMD5 = utils.String(v.(string)) } + var file *os.File + if v, ok := d.GetOk("source"); ok { + file, err = os.Open(v.(string)) + if err != nil { + return fmt.Errorf("opening file : %s", err) + } + + info, err := file.Stat() + if err != nil { + return fmt.Errorf("'stat'-ing File %q (File Share %q / Account %q): %+v", fileName, shareName, accountName, err) + } + + input.ContentLength = info.Size() + } + if _, err := client.Create(ctx, accountName, shareName, directoryName, fileName, input); err != nil { - return fmt.Errorf("Error creating File %q (File Share %q / Account %q): %+v", fileName, shareName, accountName, err) + return fmt.Errorf("creating File %q (File Share %q / Account %q): %+v", fileName, shareName, accountName, err) } - // TODO Check if this is true - /* - // Storage Share Directories are eventually consistent - log.Printf("[DEBUG] Waiting for File %q (File Share %q / Account %q) to become available", fileName, shareName, accountName) - stateConf := &resource.StateChangeConf{ - Pending: []string{"404"}, - Target: []string{"200"}, - Refresh: storageShareDirectoryRefreshFunc(ctx, client, accountName, shareName, directoryName), - MinTimeout: 10 * time.Second, - ContinuousTargetOccurence: 5, - Timeout: d.Timeout(schema.TimeoutCreate), + if file != nil { + if err := client.PutFile(ctx, accountName, shareName, directoryName, fileName, file, 4); err != nil { + return fmt.Errorf("uploading File: %q (File Share %q / Account %q): %+v", fileName, shareName, accountName, err) } - - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for Directory %q (File Share %q / Account %q) to become available: %s", directoryName, shareName, accountName, err) - }*/ + } resourceID := client.GetResourceID(accountName, shareName, directoryName, fileName) d.SetId(resourceID) @@ -205,29 +210,12 @@ func resourceArmStorageShareFileUpdate(d *schema.ResourceData, meta interface{}) } } - if d.HasChange("metadata") { - metaDataRaw := d.Get("metadata").(map[string]interface{}) - metaData := ExpandMetaData(metaDataRaw) - - account, err := storageClient.FindAccount(ctx, id.AccountName) - if err != nil { - return fmt.Errorf("Error retrieving Account %q for File %q (Share %q): %s", id.AccountName, id.FileName, id.ShareName, err) - } - if account == nil { - return fmt.Errorf("Unable to locate Storage Account %q!", id.AccountName) - } - - if _, err := client.SetMetaData(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName, metaData); err != nil { - return fmt.Errorf("Error updating MetaData for File %q (File Share %q / Account %q): %+v", id.FileName, id.ShareName, id.AccountName, err) - } - } - - if d.HasChange("content_type") || d.HasChange("content_length") || d.HasChange("content_encoding") || d.HasChange("content_disposition") || d.HasChange("content_md5") { + if d.HasChange("content_type") || d.HasChange("content_encoding") || d.HasChange("content_disposition") || d.HasChange("content_md5") { input := files.SetPropertiesInput{ ContentType: utils.String(d.Get("content_type").(string)), - ContentLength: utils.Int64(int64(d.Get("content_length").(int))), ContentEncoding: utils.String(d.Get("content_encoding").(string)), ContentDisposition: utils.String(d.Get("content_disposition").(string)), + MetaData: ExpandMetaData(d.Get("metadata").(map[string]interface{})), } if v, ok := d.GetOk("content_md5"); ok { @@ -239,6 +227,14 @@ func resourceArmStorageShareFileUpdate(d *schema.ResourceData, meta interface{}) } } + /* + metaDataRaw := d.Get("metadata").(map[string]interface{}) + metaData := ExpandMetaData(metaDataRaw) + + if _, err := client.SetMetaData(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName, metaData); err != nil { + return fmt.Errorf("updating MetaData for File %q (File Share %q / Account %q): %+v", id.FileName, id.ShareName, id.AccountName, err) + }*/ + return resourceArmStorageShareFileRead(d, meta) } @@ -281,7 +277,6 @@ func resourceArmStorageShareFileRead(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error setting `metadata`: %s", err) } d.Set("content_type", props.ContentType) - d.Set("content_length", props.ContentLength) d.Set("content_encoding", props.ContentEncoding) d.Set("content_md5", props.ContentMD5) d.Set("content_disposition", props.ContentDisposition) diff --git a/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go b/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go index 639b0bf307cb..b8e30c1fb59c 100644 --- a/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go +++ b/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go @@ -2,6 +2,7 @@ package tests import ( "fmt" + "io/ioutil" "net/http" "testing" @@ -25,7 +26,7 @@ func TestAccAzureRMStorageShareFile_basic(t *testing.T) { testCheckAzureRMStorageShareFileExists(data.ResourceName), ), }, - data.ImportStep(), + data.ImportStep("parallelism"), }, }) } @@ -63,7 +64,67 @@ func TestAccAzureRMStorageShareFile_complete(t *testing.T) { testCheckAzureRMStorageShareFileExists(data.ResourceName), ), }, - data.ImportStep(), + data.ImportStep("parallelism"), + }, + }) +} + +func TestAccAzureRMStorageShareFile_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_storage_share_file", "test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMStorageShareFileDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMStorageShareFile_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMStorageShareFileExists(data.ResourceName), + ), + }, + data.ImportStep("parallelism"), + { + Config: testAccAzureRMStorageShareFile_complete(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMStorageShareFileExists(data.ResourceName), + ), + }, + data.ImportStep("parallelism"), + { + Config: testAccAzureRMStorageShareFile_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMStorageShareFileExists(data.ResourceName), + ), + }, + data.ImportStep("parallelism"), + }, + }) +} + +func TestAccAzureRMStorageShareFile_withFile(t *testing.T) { + sourceBlob, err := ioutil.TempFile("", "") + if err != nil { + t.Fatalf("Failed to create local source blob file") + } + + if err := testAccAzureRMStorageBlob_populateTempFile(sourceBlob); err != nil { + t.Fatalf("Error populating temp file: %s", err) + } + data := acceptance.BuildTestData(t, "azurerm_storage_share_file", "test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMStorageShareFileDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMStorageShareFile_withFile(data, sourceBlob.Name()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMStorageShareFileExists(data.ResourceName), + ), + }, + data.ImportStep("source", "parallelism"), }, }) } @@ -222,7 +283,6 @@ resource "azurerm_storage_share_file" "test" { storage_account_name = azurerm_storage_account.test.name content_type = "test_content_type" - content_length = 100 content_encoding = "test_encoding" content_disposition = "test_content_disposition" @@ -232,3 +292,23 @@ resource "azurerm_storage_share_file" "test" { } `, template) } + +func testAccAzureRMStorageShareFile_withFile(data acceptance.TestData, fileName string) string { + template := testAccAzureRMStorageShareFile_template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_share_file" "test" { + name = "dir" + share_name = azurerm_storage_share.test.name + storage_account_name = azurerm_storage_account.test.name + + + source = "%s" + + metadata = { + hello = "world" + } +} +`, template, fileName) +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/properties_set.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/properties_set.go index b5514be5c0dd..521b1bba7be7 100644 --- a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/properties_set.go +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/properties_set.go @@ -11,13 +11,14 @@ import ( "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/validation" "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" ) type SetPropertiesInput struct { // Resizes a file to the specified size. // If the specified byte value is less than the current size of the file, // then all ranges above the specified byte value are cleared. - ContentLength *int64 + ContentLength int64 // Modifies the cache control string for the file. // If this property is not specified on the request, then the property will be cleared for the file. @@ -62,6 +63,9 @@ type SetPropertiesInput struct { // The time at which this file was last modified - if omitted, this'll be set to "now" // This maps to the `x-ms-file-last-write-time` field. LastModified *time.Time + + // MetaData is a mapping of key value pairs which should be assigned to this file + MetaData map[string]string } // SetProperties sets the specified properties on the specified File @@ -124,6 +128,7 @@ func (client Client) SetPropertiesPreparer(ctx context.Context, accountName, sha "x-ms-version": APIVersion, "x-ms-type": "file", + "x-ms-content-length": input.ContentLength, "x-ms-file-permission": "inherit", // TODO: expose this in future "x-ms-file-attributes": "None", // TODO: expose this in future "x-ms-file-creation-time": coalesceDate(input.CreatedAt, "now"), @@ -142,9 +147,6 @@ func (client Client) SetPropertiesPreparer(ctx context.Context, accountName, sha if input.ContentLanguage != nil { headers["x-ms-content-language"] = *input.ContentLanguage } - if input.ContentLength != nil { - headers["x-ms-content-length"] = *input.ContentLength - } if input.ContentMD5 != nil { headers["x-ms-content-md5"] = *input.ContentMD5 } @@ -152,6 +154,8 @@ func (client Client) SetPropertiesPreparer(ctx context.Context, accountName, sha headers["x-ms-content-type"] = *input.ContentType } + headers = metadata.SetIntoHeaders(headers, input.MetaData) + preparer := autorest.CreatePreparer( autorest.AsContentType("application/xml; charset=utf-8"), autorest.AsPut(), diff --git a/website/azurerm.erb b/website/azurerm.erb index cd1492434bea..1f78339cc660 100644 --- a/website/azurerm.erb +++ b/website/azurerm.erb @@ -2894,6 +2894,10 @@ azurerm_storage_share_directory +
  • + azurerm_storage_share_file +
  • +
  • azurerm_storage_sync
  • diff --git a/website/docs/r/storage_share_file.html.markdown b/website/docs/r/storage_share_file.html.markdown new file mode 100644 index 000000000000..97d68f5beba1 --- /dev/null +++ b/website/docs/r/storage_share_file.html.markdown @@ -0,0 +1,90 @@ +--- +subcategory: "Storage" +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_storage_share_file" +description: |- + Manages a File within an Azure Storage File Share. +--- + +# azurerm_storage_share_file + +Manages a File within an Azure Storage File Share. + +## Example Usage + +```hcl +resource "azurerm_resource_group" "example" { + name = "azuretest" + location = "West Europe" +} + +resource "azurerm_storage_account" "example" { + name = "azureteststorage" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + account_tier = "Standard" + account_replication_type = "LRS" +} + +resource "azurerm_storage_share" "example" { + name = "sharename" + storage_account_name = azurerm_storage_account.example.name + quota = 50 +} + +resource "azurerm_storage_share_file" "example" { + name = "my-awesome-content.zip" + share_name = azurerm_storage_share.example.name + storage_account_name = azurerm_storage_account.example.name + source = "some-local-file.zip" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name (or path) of the File that should be created within this File Share. Changing this forces a new resource to be created. + +* `share_name` - (Required) The name of the File Share where this File should be created. Changing this forces a new resource to be created. + +* `storage_account_name` - (Required) The name of the Storage Account within which the File Share is located. Changing this forces a new resource to be created. + +* `directory_name` - (Optional) The storage share directory that you would like the file placed into. Changing this forces a new resource to be created. + +* `source` - (Optional) An absolute path to a file on the local system. + +* `content_type` - (Optional) The content type of the share file. Defaults to `application/octet-stream`. + +* `content_md5` - (Optional) The MD5 sum of the file contents. Changing this forces a new resource to be created. + +* `content_encoding` - (Optional) Specifies which content encodings have been applied to the file. + +* `content_disposition` - (Optional) Sets the file’s Content-Disposition header. + +* `parallelism` - (Optional) The number of workers per CPU core to run for concurrent uploads. Defaults to `4`. + +* `metadata` - (Optional) A mapping of metadata to assign to this file. + +## Attributes Reference + +The following attributes are exported in addition to the arguments listed above: + +* `id` - The ID of the file within the File Share. + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: + +* `create` - (Defaults to 30 minutes) Used when creating the Storage Share File. +* `update` - (Defaults to 30 minutes) Used when updating the Storage Share File. +* `read` - (Defaults to 5 minutes) Used when retrieving the Storage Share File. +* `delete` - (Defaults to 30 minutes) Used when deleting the Storage Share File. + +## Import + +Directories within an Azure Storage File Share can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_storage_share_file.example https://account1.file.core.windows.net/share1/file1 +``` From 6c64edd4a4ae7e68b6b2eee356607d897f7d1cc8 Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Tue, 24 Nov 2020 10:59:48 -0800 Subject: [PATCH 04/11] Fix tests --- .../services/storage/resource_arm_storage_share_file.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/azurerm/internal/services/storage/resource_arm_storage_share_file.go b/azurerm/internal/services/storage/resource_arm_storage_share_file.go index 3e90856f9c70..66e0804eaf1f 100644 --- a/azurerm/internal/services/storage/resource_arm_storage_share_file.go +++ b/azurerm/internal/services/storage/resource_arm_storage_share_file.go @@ -6,8 +6,6 @@ import ( "os" "time" - "github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" @@ -15,6 +13,7 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" + "github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files" ) func resourceArmStorageShareFile() *schema.Resource { From 58e9da4e709028080a9765167c8bc93dd93abfa2 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 26 Nov 2020 17:57:29 +0100 Subject: [PATCH 05/11] storage_share_file: removing parallelism since it's unused --- .../storage/resource_arm_storage_share_file.go | 6 ------ .../tests/resource_arm_storage_share_file_test.go | 12 ++++++------ website/docs/r/storage_share_file.html.markdown | 4 +--- 3 files changed, 7 insertions(+), 15 deletions(-) diff --git a/azurerm/internal/services/storage/resource_arm_storage_share_file.go b/azurerm/internal/services/storage/resource_arm_storage_share_file.go index 66e0804eaf1f..a056c5f6437d 100644 --- a/azurerm/internal/services/storage/resource_arm_storage_share_file.go +++ b/azurerm/internal/services/storage/resource_arm_storage_share_file.go @@ -91,12 +91,6 @@ func resourceArmStorageShareFile() *schema.Resource { ForceNew: true, }, - "parallelism": { - Type: schema.TypeInt, - Optional: true, - Default: 4, - }, - "metadata": MetaDataSchema(), }, } diff --git a/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go b/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go index b8e30c1fb59c..78d3d928bbb0 100644 --- a/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go +++ b/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go @@ -26,7 +26,7 @@ func TestAccAzureRMStorageShareFile_basic(t *testing.T) { testCheckAzureRMStorageShareFileExists(data.ResourceName), ), }, - data.ImportStep("parallelism"), + data.ImportStep(), }, }) } @@ -64,7 +64,7 @@ func TestAccAzureRMStorageShareFile_complete(t *testing.T) { testCheckAzureRMStorageShareFileExists(data.ResourceName), ), }, - data.ImportStep("parallelism"), + data.ImportStep(), }, }) } @@ -83,21 +83,21 @@ func TestAccAzureRMStorageShareFile_update(t *testing.T) { testCheckAzureRMStorageShareFileExists(data.ResourceName), ), }, - data.ImportStep("parallelism"), + data.ImportStep(), { Config: testAccAzureRMStorageShareFile_complete(data), Check: resource.ComposeTestCheckFunc( testCheckAzureRMStorageShareFileExists(data.ResourceName), ), }, - data.ImportStep("parallelism"), + data.ImportStep(), { Config: testAccAzureRMStorageShareFile_basic(data), Check: resource.ComposeTestCheckFunc( testCheckAzureRMStorageShareFileExists(data.ResourceName), ), }, - data.ImportStep("parallelism"), + data.ImportStep(), }, }) } @@ -124,7 +124,7 @@ func TestAccAzureRMStorageShareFile_withFile(t *testing.T) { testCheckAzureRMStorageShareFileExists(data.ResourceName), ), }, - data.ImportStep("source", "parallelism"), + data.ImportStep("source"), }, }) } diff --git a/website/docs/r/storage_share_file.html.markdown b/website/docs/r/storage_share_file.html.markdown index 97d68f5beba1..35c843cb21c2 100644 --- a/website/docs/r/storage_share_file.html.markdown +++ b/website/docs/r/storage_share_file.html.markdown @@ -14,7 +14,7 @@ Manages a File within an Azure Storage File Share. ```hcl resource "azurerm_resource_group" "example" { - name = "azuretest" + name = "example-resources" location = "West Europe" } @@ -62,8 +62,6 @@ The following arguments are supported: * `content_disposition` - (Optional) Sets the file’s Content-Disposition header. -* `parallelism` - (Optional) The number of workers per CPU core to run for concurrent uploads. Defaults to `4`. - * `metadata` - (Optional) A mapping of metadata to assign to this file. ## Attributes Reference From 8e14dd55d597ae1b8b28a458797cb11665da5dbd Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Mon, 30 Nov 2020 13:05:21 -0800 Subject: [PATCH 06/11] Update giovanni vendor --- .../resource_arm_storage_share_file.go | 21 ------------------- go.mod | 2 +- go.sum | 2 ++ .../giovanni/version/version.go | 2 +- vendor/modules.txt | 2 +- 5 files changed, 5 insertions(+), 24 deletions(-) diff --git a/azurerm/internal/services/storage/resource_arm_storage_share_file.go b/azurerm/internal/services/storage/resource_arm_storage_share_file.go index 66e0804eaf1f..13268a1864ff 100644 --- a/azurerm/internal/services/storage/resource_arm_storage_share_file.go +++ b/azurerm/internal/services/storage/resource_arm_storage_share_file.go @@ -226,14 +226,6 @@ func resourceArmStorageShareFileUpdate(d *schema.ResourceData, meta interface{}) } } - /* - metaDataRaw := d.Get("metadata").(map[string]interface{}) - metaData := ExpandMetaData(metaDataRaw) - - if _, err := client.SetMetaData(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName, metaData); err != nil { - return fmt.Errorf("updating MetaData for File %q (File Share %q / Account %q): %+v", id.FileName, id.ShareName, id.AccountName, err) - }*/ - return resourceArmStorageShareFileRead(d, meta) } @@ -312,16 +304,3 @@ func resourceArmStorageShareFileDelete(d *schema.ResourceData, meta interface{}) return nil } - -/* -func storageShareDirectoryRefreshFunc(ctx context.Context, client *directories.Client, accountName, shareName, directoryName string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - res, err := client.Get(ctx, accountName, shareName, directoryName) - if err != nil { - return nil, strconv.Itoa(res.StatusCode), fmt.Errorf("Error retrieving Directory %q (File Share %q / Account %q): %s", directoryName, shareName, accountName, err) - } - - return res, strconv.Itoa(res.StatusCode), nil - } -} -*/ diff --git a/go.mod b/go.mod index ad73bab49da9..634c44255159 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/satori/uuid v0.0.0-20160927100844-b061729afc07 github.com/sergi/go-diff v1.1.0 github.com/terraform-providers/terraform-provider-azuread v0.9.0 - github.com/tombuildsstuff/giovanni v0.14.0 + github.com/tombuildsstuff/giovanni v0.15.0 golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 golang.org/x/net v0.0.0-20200301022130-244492dfa37a gopkg.in/yaml.v2 v2.2.4 diff --git a/go.sum b/go.sum index 10d7d4d59c0f..ec1cbdcfff32 100644 --- a/go.sum +++ b/go.sum @@ -264,6 +264,8 @@ github.com/terraform-providers/terraform-provider-azuread v0.9.0 h1:XLzFgVHakq6q github.com/terraform-providers/terraform-provider-azuread v0.9.0/go.mod h1:sSDzB/8CD639+yWo5lZf+NJvGSYQBSS6z+GoET9IrzE= github.com/tombuildsstuff/giovanni v0.14.0 h1:vBgZJHNs8p42Nj4GaffPe7nzs2Z2qIyKUN+7793UggA= github.com/tombuildsstuff/giovanni v0.14.0/go.mod h1:0TZugJPEtqzPlMpuJHYfXY6Dq2uLPrXf98D2XQSxNbA= +github.com/tombuildsstuff/giovanni v0.15.0 h1:DMVKSdnKhYRDqeQUg6JhUxOoeCnO2BUNZJM9z/lSigA= +github.com/tombuildsstuff/giovanni v0.15.0/go.mod h1:0TZugJPEtqzPlMpuJHYfXY6Dq2uLPrXf98D2XQSxNbA= github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok= github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= diff --git a/vendor/github.com/tombuildsstuff/giovanni/version/version.go b/vendor/github.com/tombuildsstuff/giovanni/version/version.go index 8f6af1b76b47..0bec9691d477 100644 --- a/vendor/github.com/tombuildsstuff/giovanni/version/version.go +++ b/vendor/github.com/tombuildsstuff/giovanni/version/version.go @@ -1,3 +1,3 @@ package version -const Number = "v0.14.0" +const Number = "v0.15.0" diff --git a/vendor/modules.txt b/vendor/modules.txt index 5574cdc8eab2..6f30a14b43f5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -368,7 +368,7 @@ github.com/terraform-providers/terraform-provider-azuread/azuread/helpers/slices github.com/terraform-providers/terraform-provider-azuread/azuread/helpers/tf github.com/terraform-providers/terraform-provider-azuread/azuread/helpers/validate github.com/terraform-providers/terraform-provider-azuread/version -# github.com/tombuildsstuff/giovanni v0.14.0 +# github.com/tombuildsstuff/giovanni v0.15.0 github.com/tombuildsstuff/giovanni/storage/2019-12-12/blob/accounts github.com/tombuildsstuff/giovanni/storage/2019-12-12/blob/blobs github.com/tombuildsstuff/giovanni/storage/2019-12-12/blob/containers From 7e7ee7d8eef9e7d309cc2381a03b816e9ece287f Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Tue, 1 Dec 2020 14:41:15 -0800 Subject: [PATCH 07/11] Address review --- .../resource_arm_storage_share_file.go | 101 +++++++++++------ .../resource_arm_storage_share_file_test.go | 102 +++++++++--------- .../storage/validate/storage_share.go | 17 +++ .../docs/r/storage_share_file.html.markdown | 7 +- 4 files changed, 142 insertions(+), 85 deletions(-) diff --git a/azurerm/internal/services/storage/resource_arm_storage_share_file.go b/azurerm/internal/services/storage/resource_arm_storage_share_file.go index 738327ab2d73..2c26cd67db72 100644 --- a/azurerm/internal/services/storage/resource_arm_storage_share_file.go +++ b/azurerm/internal/services/storage/resource_arm_storage_share_file.go @@ -6,6 +6,10 @@ import ( "os" "time" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/storage/parse" + + storageValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/storage/validate" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" @@ -40,19 +44,13 @@ func resourceArmStorageShareFile() *schema.Resource { ForceNew: true, // TODO: add validation }, - "share_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringIsNotEmpty, - }, - "storage_account_name": { + "storage_share_id": { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validation.StringIsNotEmpty, + ValidateFunc: storageValidate.StorageShareID, }, - "directory_name": { + "path": { Type: schema.TypeString, ForceNew: true, Optional: true, @@ -101,17 +99,33 @@ func resourceArmStorageShareFileCreate(d *schema.ResourceData, meta interface{}) defer cancel() storageClient := meta.(*clients.Client).Storage - accountName := d.Get("storage_account_name").(string) - shareName := d.Get("share_name").(string) + storageShareID, err := parse.StorageShareDataPlaneID(d.Get("storage_share_id").(string)) + if err != nil { + return err + } + fileName := d.Get("name").(string) - directoryName := d.Get("directory_name").(string) + path := d.Get("path").(string) - account, err := storageClient.FindAccount(ctx, accountName) + account, err := storageClient.FindAccount(ctx, storageShareID.AccountName) if err != nil { - return fmt.Errorf("Error retrieving Account %q for File %q (Share %q): %s", accountName, fileName, shareName, err) + return fmt.Errorf("Error retrieving Account %q for File %q (Share %q): %s", storageShareID.AccountName, fileName, storageShareID.Name, err) } if account == nil { - return fmt.Errorf("Unable to locate Storage Account %q!", accountName) + return fmt.Errorf("Unable to locate Storage Account %q!", storageShareID.AccountName) + } + + fileSharesClient, err := storageClient.FileSharesClient(ctx, *account) + if err != nil { + return fmt.Errorf("building File Share Directories Client: %s", err) + } + + share, err := fileSharesClient.Get(ctx, account.ResourceGroup, storageShareID.AccountName, storageShareID.Name) + if err != nil { + return fmt.Errorf("retrieving Share %q for File %q: %s", storageShareID.Name, fileName, err) + } + if share == nil { + return fmt.Errorf("unable to locate Storage Share %q", storageShareID.Name) } client, err := storageClient.FileShareFilesClient(ctx, *account) @@ -119,15 +133,15 @@ func resourceArmStorageShareFileCreate(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error building File Share Directories Client: %s", err) } - existing, err := client.GetProperties(ctx, accountName, shareName, directoryName, fileName) + existing, err := client.GetProperties(ctx, storageShareID.AccountName, storageShareID.Name, path, fileName) if err != nil { if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("Error checking for presence of existing File %q (File Share %q / Storage Account %q / Resource Group %q): %s", fileName, shareName, accountName, account.ResourceGroup, err) + return fmt.Errorf("Error checking for presence of existing File %q (File Share %q / Storage Account %q / Resource Group %q): %s", fileName, storageShareID.Name, storageShareID.AccountName, account.ResourceGroup, err) } } if !utils.ResponseWasNotFound(existing.Response) { - id := client.GetResourceID(accountName, shareName, directoryName, fileName) + id := client.GetResourceID(storageShareID.AccountName, storageShareID.Name, path, fileName) return tf.ImportAsExistsError("azurerm_storage_share_file", id) } @@ -151,23 +165,23 @@ func resourceArmStorageShareFileCreate(d *schema.ResourceData, meta interface{}) info, err := file.Stat() if err != nil { - return fmt.Errorf("'stat'-ing File %q (File Share %q / Account %q): %+v", fileName, shareName, accountName, err) + return fmt.Errorf("'stat'-ing File %q (File Share %q / Account %q): %+v", fileName, storageShareID.Name, storageShareID.AccountName, err) } input.ContentLength = info.Size() } - if _, err := client.Create(ctx, accountName, shareName, directoryName, fileName, input); err != nil { - return fmt.Errorf("creating File %q (File Share %q / Account %q): %+v", fileName, shareName, accountName, err) + if _, err := client.Create(ctx, storageShareID.AccountName, storageShareID.Name, path, fileName, input); err != nil { + return fmt.Errorf("creating File %q (File Share %q / Account %q): %+v", fileName, storageShareID.Name, storageShareID.AccountName, err) } if file != nil { - if err := client.PutFile(ctx, accountName, shareName, directoryName, fileName, file, 4); err != nil { - return fmt.Errorf("uploading File: %q (File Share %q / Account %q): %+v", fileName, shareName, accountName, err) + if err := client.PutFile(ctx, storageShareID.AccountName, storageShareID.Name, path, fileName, file, 4); err != nil { + return fmt.Errorf("uploading File: %q (File Share %q / Account %q): %+v", fileName, storageShareID.Name, storageShareID.AccountName, err) } } - resourceID := client.GetResourceID(accountName, shareName, directoryName, fileName) + resourceID := client.GetResourceID(storageShareID.AccountName, storageShareID.Name, path, fileName) d.SetId(resourceID) return resourceArmStorageShareFileRead(d, meta) @@ -185,15 +199,28 @@ func resourceArmStorageShareFileUpdate(d *schema.ResourceData, meta interface{}) account, err := storageClient.FindAccount(ctx, id.AccountName) if err != nil { - return fmt.Errorf("Error retrieving Account %q for File %q (Share %q): %s", id.AccountName, id.FileName, id.ShareName, err) + return fmt.Errorf("retrieving Account %q for File %q (Share %q): %s", id.AccountName, id.FileName, id.ShareName, err) } if account == nil { return fmt.Errorf("Unable to locate Storage Account %q!", id.AccountName) } + fileSharesClient, err := storageClient.FileSharesClient(ctx, *account) + if err != nil { + return fmt.Errorf("building File Share Directories Client: %s", err) + } + + share, err := fileSharesClient.Get(ctx, account.ResourceGroup, id.AccountName, id.ShareName) + if err != nil { + return fmt.Errorf("retrieving Share %q for File %q: %s", id.ShareName, id.FileName, err) + } + if share == nil { + return fmt.Errorf("unable to locate Storage Share %q", id.ShareName) + } + client, err := storageClient.FileShareFilesClient(ctx, *account) if err != nil { - return fmt.Errorf("Error building File Share Directories Client: %s", err) + return fmt.Errorf("building File Share Files Client: %s", err) } existing, err := client.GetProperties(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName) @@ -238,7 +265,22 @@ func resourceArmStorageShareFileRead(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error retrieving Account %q for File %q (Share %q): %s", id.AccountName, id.FileName, id.ShareName, err) } if account == nil { - log.Printf("[WARN] Unable to determine Resource Group for Storage Share File %q (Share %s, Account %s) - assuming removed & removing from state", id.FileName, id.ShareName, id.AccountName) + log.Printf("[WARN] Unable to determine Storage Account for Storage Share File %q (Share %s, Account %s) - assuming removed & removing from state", id.FileName, id.ShareName, id.AccountName) + d.SetId("") + return nil + } + + fileSharesClient, err := storageClient.FileSharesClient(ctx, *account) + if err != nil { + return fmt.Errorf("building File Share Directories Client: %s", err) + } + + share, err := fileSharesClient.Get(ctx, account.ResourceGroup, id.AccountName, id.ShareName) + if err != nil { + return fmt.Errorf("retrieving Share %q for File %q: %s", id.ShareName, id.FileName, err) + } + if share == nil { + log.Printf("[WARN] Unable to determine Storage Share for Storage Share File %q (Share %s, Account %s) - assuming removed & removing from state", id.FileName, id.ShareName, id.AccountName) d.SetId("") return nil } @@ -254,9 +296,8 @@ func resourceArmStorageShareFileRead(d *schema.ResourceData, meta interface{}) e } d.Set("name", id.FileName) - d.Set("directory_name", id.DirectoryName) - d.Set("share_name", id.ShareName) - d.Set("storage_account_name", id.AccountName) + d.Set("path", id.DirectoryName) + d.Set("storage_share_id", parse.NewStorageShareDataPlaneId(id.AccountName, storageClient.Environment.StorageEndpointSuffix, id.ShareName).ID("")) if err := d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { return fmt.Errorf("Error setting `metadata`: %s", err) diff --git a/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go b/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go index 78d3d928bbb0..ccd961cf00a0 100644 --- a/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go +++ b/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go @@ -6,6 +6,8 @@ import ( "net/http" "testing" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/storage/parse" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" @@ -141,16 +143,18 @@ func testCheckAzureRMStorageShareFileExists(resourceName string) resource.TestCh } name := rs.Primary.Attributes["name"] - shareName := rs.Primary.Attributes["share_name"] - accountName := rs.Primary.Attributes["storage_account_name"] - directoryName := rs.Primary.Attributes["directory_name"] + storageShareID, err := parse.StorageShareDataPlaneID(rs.Primary.Attributes["storage_share_id"]) + if err != nil { + return err + } + path := rs.Primary.Attributes["path"] - account, err := storageClient.FindAccount(ctx, accountName) + account, err := storageClient.FindAccount(ctx, storageShareID.AccountName) if err != nil { - return fmt.Errorf("Error retrieving Account %q for File %q (Share %q): %s", accountName, name, shareName, err) + return fmt.Errorf("Error retrieving Account %q for File %q (Share %q): %s", storageShareID.AccountName, name, storageShareID.Name, err) } if account == nil { - return fmt.Errorf("Unable to locate Storage Account %q!", accountName) + return fmt.Errorf("Unable to locate Storage Account %q!", storageShareID.AccountName) } client, err := storageClient.FileShareFilesClient(ctx, *account) @@ -158,13 +162,13 @@ func testCheckAzureRMStorageShareFileExists(resourceName string) resource.TestCh return fmt.Errorf("Error building FileShare File Client: %s", err) } - resp, err := client.GetProperties(ctx, accountName, shareName, directoryName, name) + resp, err := client.GetProperties(ctx, storageShareID.AccountName, storageShareID.Name, path, name) if err != nil { return fmt.Errorf("Bad: Get on FileShareFilesClient: %+v", err) } if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: File %q (File Share %q / Account %q / Resource Group %q) does not exist", name, shareName, accountName, account.ResourceGroup) + return fmt.Errorf("Bad: File %q (File Share %q / Account %q / Resource Group %q) does not exist", name, storageShareID.Name, storageShareID.AccountName, account.ResourceGroup) } return nil @@ -181,13 +185,15 @@ func testCheckAzureRMStorageShareFileDestroy(s *terraform.State) error { } name := rs.Primary.Attributes["name"] - shareName := rs.Primary.Attributes["share_name"] - accountName := rs.Primary.Attributes["storage_account_name"] - directoryName := rs.Primary.Attributes["directory_name"] + storageShareID, err := parse.StorageShareDataPlaneID(rs.Primary.Attributes["storage_share_id"]) + if err != nil { + return err + } + path := rs.Primary.Attributes["path"] - account, err := storageClient.FindAccount(ctx, accountName) + account, err := storageClient.FindAccount(ctx, storageShareID.AccountName) if err != nil { - return fmt.Errorf("Error retrieving Account %q for File %q (Share %q): %s", accountName, name, shareName, err) + return fmt.Errorf("Error retrieving Account %q for File %q (Share %q): %s", storageShareID.AccountName, name, storageShareID.Name, err) } // not found, the account's gone @@ -196,7 +202,7 @@ func testCheckAzureRMStorageShareFileDestroy(s *terraform.State) error { } if err != nil { - return fmt.Errorf("Error locating Resource Group for Storage Share File %q (Share %s, Account %s): %s", name, shareName, accountName, err) + return fmt.Errorf("Error locating Resource Group for Storage Share File %q (Share %s, Account %s): %s", name, storageShareID.Name, storageShareID.AccountName, err) } client, err := storageClient.FileShareFilesClient(ctx, *account) @@ -204,7 +210,7 @@ func testCheckAzureRMStorageShareFileDestroy(s *terraform.State) error { return fmt.Errorf("Error building FileShare File Client: %s", err) } - resp, err := client.GetProperties(ctx, accountName, shareName, directoryName, name) + resp, err := client.GetProperties(ctx, storageShareID.AccountName, storageShareID.Name, path, name) if err != nil { return nil } @@ -215,36 +221,6 @@ func testCheckAzureRMStorageShareFileDestroy(s *terraform.State) error { return nil } -func testAccAzureRMStorageShareFile_basic(data acceptance.TestData) string { - template := testAccAzureRMStorageShareFile_template(data) - return fmt.Sprintf(` -%s - -resource "azurerm_storage_share_file" "test" { - name = "dir" - share_name = azurerm_storage_share.test.name - storage_account_name = azurerm_storage_account.test.name - - metadata = { - hello = "world" - } -} -`, template) -} - -func testAccAzureRMStorageShareFile_requiresImport(data acceptance.TestData) string { - template := testAccAzureRMStorageShareFile_basic(data) - return fmt.Sprintf(` -%s - -resource "azurerm_storage_share_file" "import" { - name = azurerm_storage_share_file.test.name - share_name = azurerm_storage_share_file.test.share_name - storage_account_name = azurerm_storage_share_file.test.storage_account_name -} -`, template) -} - func testAccAzureRMStorageShareFile_template(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { @@ -272,6 +248,34 @@ resource "azurerm_storage_share" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomString) } +func testAccAzureRMStorageShareFile_basic(data acceptance.TestData) string { + template := testAccAzureRMStorageShareFile_template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_share_file" "test" { + name = "dir" + storage_share_id = azurerm_storage_share.test.id + + metadata = { + hello = "world" + } +} +`, template) +} + +func testAccAzureRMStorageShareFile_requiresImport(data acceptance.TestData) string { + template := testAccAzureRMStorageShareFile_basic(data) + return fmt.Sprintf(` +%s + +resource "azurerm_storage_share_file" "import" { + name = azurerm_storage_share_file.test.name + storage_share_id = azurerm_storage_share.test.id +} +`, template) +} + func testAccAzureRMStorageShareFile_complete(data acceptance.TestData) string { template := testAccAzureRMStorageShareFile_template(data) return fmt.Sprintf(` @@ -279,8 +283,8 @@ func testAccAzureRMStorageShareFile_complete(data acceptance.TestData) string { resource "azurerm_storage_share_file" "test" { name = "dir" - share_name = azurerm_storage_share.test.name - storage_account_name = azurerm_storage_account.test.name + storage_share_id = azurerm_storage_share.test.id + content_type = "test_content_type" content_encoding = "test_encoding" @@ -300,9 +304,7 @@ func testAccAzureRMStorageShareFile_withFile(data acceptance.TestData, fileName resource "azurerm_storage_share_file" "test" { name = "dir" - share_name = azurerm_storage_share.test.name - storage_account_name = azurerm_storage_account.test.name - + storage_share_id = azurerm_storage_share.test.id source = "%s" diff --git a/azurerm/internal/services/storage/validate/storage_share.go b/azurerm/internal/services/storage/validate/storage_share.go index 2f0192c83ce5..95a90b9d2f3d 100644 --- a/azurerm/internal/services/storage/validate/storage_share.go +++ b/azurerm/internal/services/storage/validate/storage_share.go @@ -3,6 +3,8 @@ package validate import ( "fmt" "regexp" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/storage/parse" ) func ShareName(v interface{}, k string) (warnings []string, errors []error) { @@ -28,3 +30,18 @@ func ShareName(v interface{}, k string) (warnings []string, errors []error) { } return warnings, errors } + +func StorageShareID(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %q to be string", k)) + return + } + + if _, err := parse.StorageShareDataPlaneID(v); err != nil { + errors = append(errors, fmt.Errorf("Can not parse %q as a resource id: %v", k, err)) + return + } + + return warnings, errors +} diff --git a/website/docs/r/storage_share_file.html.markdown b/website/docs/r/storage_share_file.html.markdown index 35c843cb21c2..6c9de8313380 100644 --- a/website/docs/r/storage_share_file.html.markdown +++ b/website/docs/r/storage_share_file.html.markdown @@ -34,8 +34,7 @@ resource "azurerm_storage_share" "example" { resource "azurerm_storage_share_file" "example" { name = "my-awesome-content.zip" - share_name = azurerm_storage_share.example.name - storage_account_name = azurerm_storage_account.example.name + storage_share_id = azurerm_storage_share.example.id source = "some-local-file.zip" } ``` @@ -46,9 +45,7 @@ The following arguments are supported: * `name` - (Required) The name (or path) of the File that should be created within this File Share. Changing this forces a new resource to be created. -* `share_name` - (Required) The name of the File Share where this File should be created. Changing this forces a new resource to be created. - -* `storage_account_name` - (Required) The name of the Storage Account within which the File Share is located. Changing this forces a new resource to be created. +* `storage_share_id` - (Required) The Storage Share ID in which this file will be placed into. Changing this forces a new resource to be created. * `directory_name` - (Optional) The storage share directory that you would like the file placed into. Changing this forces a new resource to be created. From c50d9058294ac422a1005beae4d08a61a769c286 Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Tue, 1 Dec 2020 14:54:19 -0800 Subject: [PATCH 08/11] go mod tidy --- go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.sum b/go.sum index ec1cbdcfff32..c9d29db5e42b 100644 --- a/go.sum +++ b/go.sum @@ -262,8 +262,6 @@ github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJy github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/terraform-providers/terraform-provider-azuread v0.9.0 h1:XLzFgVHakq6qjJ2L0o/tN2yHu/hT4vIW9sKtejr7gPs= github.com/terraform-providers/terraform-provider-azuread v0.9.0/go.mod h1:sSDzB/8CD639+yWo5lZf+NJvGSYQBSS6z+GoET9IrzE= -github.com/tombuildsstuff/giovanni v0.14.0 h1:vBgZJHNs8p42Nj4GaffPe7nzs2Z2qIyKUN+7793UggA= -github.com/tombuildsstuff/giovanni v0.14.0/go.mod h1:0TZugJPEtqzPlMpuJHYfXY6Dq2uLPrXf98D2XQSxNbA= github.com/tombuildsstuff/giovanni v0.15.0 h1:DMVKSdnKhYRDqeQUg6JhUxOoeCnO2BUNZJM9z/lSigA= github.com/tombuildsstuff/giovanni v0.15.0/go.mod h1:0TZugJPEtqzPlMpuJHYfXY6Dq2uLPrXf98D2XQSxNbA= github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok= From 81261a173d53285b08c33e8afe38bc2801b8b279 Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Tue, 1 Dec 2020 15:01:32 -0800 Subject: [PATCH 09/11] Fix comments --- .../resource_arm_storage_share_file.go | 31 +++++++++---------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/azurerm/internal/services/storage/resource_arm_storage_share_file.go b/azurerm/internal/services/storage/resource_arm_storage_share_file.go index 2c26cd67db72..6fac14306618 100644 --- a/azurerm/internal/services/storage/resource_arm_storage_share_file.go +++ b/azurerm/internal/services/storage/resource_arm_storage_share_file.go @@ -42,7 +42,6 @@ func resourceArmStorageShareFile() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - // TODO: add validation }, "storage_share_id": { Type: schema.TypeString, @@ -109,10 +108,10 @@ func resourceArmStorageShareFileCreate(d *schema.ResourceData, meta interface{}) account, err := storageClient.FindAccount(ctx, storageShareID.AccountName) if err != nil { - return fmt.Errorf("Error retrieving Account %q for File %q (Share %q): %s", storageShareID.AccountName, fileName, storageShareID.Name, err) + return fmt.Errorf("eretrieving Account %q for File %q (Share %q): %s", storageShareID.AccountName, fileName, storageShareID.Name, err) } if account == nil { - return fmt.Errorf("Unable to locate Storage Account %q!", storageShareID.AccountName) + return fmt.Errorf("unable to locate Storage Account %q!", storageShareID.AccountName) } fileSharesClient, err := storageClient.FileSharesClient(ctx, *account) @@ -130,13 +129,13 @@ func resourceArmStorageShareFileCreate(d *schema.ResourceData, meta interface{}) client, err := storageClient.FileShareFilesClient(ctx, *account) if err != nil { - return fmt.Errorf("Error building File Share Directories Client: %s", err) + return fmt.Errorf("building File Share Directories Client: %s", err) } existing, err := client.GetProperties(ctx, storageShareID.AccountName, storageShareID.Name, path, fileName) if err != nil { if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("Error checking for presence of existing File %q (File Share %q / Storage Account %q / Resource Group %q): %s", fileName, storageShareID.Name, storageShareID.AccountName, account.ResourceGroup, err) + return fmt.Errorf("checking for presence of existing File %q (File Share %q / Storage Account %q / Resource Group %q): %s", fileName, storageShareID.Name, storageShareID.AccountName, account.ResourceGroup, err) } } @@ -202,7 +201,7 @@ func resourceArmStorageShareFileUpdate(d *schema.ResourceData, meta interface{}) return fmt.Errorf("retrieving Account %q for File %q (Share %q): %s", id.AccountName, id.FileName, id.ShareName, err) } if account == nil { - return fmt.Errorf("Unable to locate Storage Account %q!", id.AccountName) + return fmt.Errorf("unable to locate Storage Account %q!", id.AccountName) } fileSharesClient, err := storageClient.FileSharesClient(ctx, *account) @@ -226,7 +225,7 @@ func resourceArmStorageShareFileUpdate(d *schema.ResourceData, meta interface{}) existing, err := client.GetProperties(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName) if err != nil { if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("Error checking for presence of existing File %q (File Share %q / Storage Account %q / Resource Group %q): %s", id.FileName, id.ShareName, id.AccountName, account.ResourceGroup, err) + return fmt.Errorf("checking for presence of existing File %q (File Share %q / Storage Account %q / Resource Group %q): %s", id.FileName, id.ShareName, id.AccountName, account.ResourceGroup, err) } } @@ -243,7 +242,7 @@ func resourceArmStorageShareFileUpdate(d *schema.ResourceData, meta interface{}) } if _, err := client.SetProperties(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName, input); err != nil { - return fmt.Errorf("Error creating File %q (File Share %q / Account %q): %+v", id.FileName, id.ShareName, id.AccountName, err) + return fmt.Errorf("creating File %q (File Share %q / Account %q): %+v", id.FileName, id.ShareName, id.AccountName, err) } } @@ -262,7 +261,7 @@ func resourceArmStorageShareFileRead(d *schema.ResourceData, meta interface{}) e account, err := storageClient.FindAccount(ctx, id.AccountName) if err != nil { - return fmt.Errorf("Error retrieving Account %q for File %q (Share %q): %s", id.AccountName, id.FileName, id.ShareName, err) + return fmt.Errorf("retrieving Account %q for File %q (Share %q): %s", id.AccountName, id.FileName, id.ShareName, err) } if account == nil { log.Printf("[WARN] Unable to determine Storage Account for Storage Share File %q (Share %s, Account %s) - assuming removed & removing from state", id.FileName, id.ShareName, id.AccountName) @@ -287,12 +286,12 @@ func resourceArmStorageShareFileRead(d *schema.ResourceData, meta interface{}) e client, err := storageClient.FileShareFilesClient(ctx, *account) if err != nil { - return fmt.Errorf("Error building File Share Client for Storage Account %q (Resource Group %q): %s", id.AccountName, account.ResourceGroup, err) + return fmt.Errorf("building File Share Client for Storage Account %q (Resource Group %q): %s", id.AccountName, account.ResourceGroup, err) } props, err := client.GetProperties(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName) if err != nil { - return fmt.Errorf("Error retrieving Storage Share %q (File Share %q / Account %q / Resource Group %q): %s", id.DirectoryName, id.ShareName, id.AccountName, account.ResourceGroup, err) + return fmt.Errorf("retrieving Storage Share %q (File Share %q / Account %q / Resource Group %q): %s", id.DirectoryName, id.ShareName, id.AccountName, account.ResourceGroup, err) } d.Set("name", id.FileName) @@ -300,7 +299,7 @@ func resourceArmStorageShareFileRead(d *schema.ResourceData, meta interface{}) e d.Set("storage_share_id", parse.NewStorageShareDataPlaneId(id.AccountName, storageClient.Environment.StorageEndpointSuffix, id.ShareName).ID("")) if err := d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { - return fmt.Errorf("Error setting `metadata`: %s", err) + return fmt.Errorf("setting `metadata`: %s", err) } d.Set("content_type", props.ContentType) d.Set("content_encoding", props.ContentEncoding) @@ -322,19 +321,19 @@ func resourceArmStorageShareFileDelete(d *schema.ResourceData, meta interface{}) account, err := storageClient.FindAccount(ctx, id.AccountName) if err != nil { - return fmt.Errorf("Error retrieving Account %q for File %q (Share %q): %s", id.AccountName, id.FileName, id.ShareName, err) + return fmt.Errorf("retrieving Account %q for File %q (Share %q): %s", id.AccountName, id.FileName, id.ShareName, err) } if account == nil { - return fmt.Errorf("Unable to locate Storage Account %q!", id.AccountName) + return fmt.Errorf("unable to locate Storage Account %q", id.AccountName) } client, err := storageClient.FileShareFilesClient(ctx, *account) if err != nil { - return fmt.Errorf("Error building File Share File Client for Storage Account %q (Resource Group %q): %s", id.AccountName, account.ResourceGroup, err) + return fmt.Errorf("building File Share File Client for Storage Account %q (Resource Group %q): %s", id.AccountName, account.ResourceGroup, err) } if _, err := client.Delete(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName); err != nil { - return fmt.Errorf("Error deleting Storage Share File %q (File Share %q / Account %q / Resource Group %q): %s", id.FileName, id.ShareName, id.AccountName, account.ResourceGroup, err) + return fmt.Errorf("deleting Storage Share File %q (File Share %q / Account %q / Resource Group %q): %s", id.FileName, id.ShareName, id.AccountName, account.ResourceGroup, err) } return nil From 911e7993e0a79604bab46b8c090f2ad4f027d703 Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Tue, 1 Dec 2020 23:40:30 -0800 Subject: [PATCH 10/11] Lint --- .../resource_arm_storage_share_file_test.go | 18 +++++++++--------- .../docs/r/storage_share_file.html.markdown | 6 +++--- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go b/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go index ccd961cf00a0..083f79d56157 100644 --- a/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go +++ b/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go @@ -254,8 +254,8 @@ func testAccAzureRMStorageShareFile_basic(data acceptance.TestData) string { %s resource "azurerm_storage_share_file" "test" { - name = "dir" - storage_share_id = azurerm_storage_share.test.id + name = "dir" + storage_share_id = azurerm_storage_share.test.id metadata = { hello = "world" @@ -270,8 +270,8 @@ func testAccAzureRMStorageShareFile_requiresImport(data acceptance.TestData) str %s resource "azurerm_storage_share_file" "import" { - name = azurerm_storage_share_file.test.name - storage_share_id = azurerm_storage_share.test.id + name = azurerm_storage_share_file.test.name + storage_share_id = azurerm_storage_share.test.id } `, template) } @@ -282,10 +282,10 @@ func testAccAzureRMStorageShareFile_complete(data acceptance.TestData) string { %s resource "azurerm_storage_share_file" "test" { - name = "dir" - storage_share_id = azurerm_storage_share.test.id + name = "dir" + storage_share_id = azurerm_storage_share.test.id + - content_type = "test_content_type" content_encoding = "test_encoding" content_disposition = "test_content_disposition" @@ -303,8 +303,8 @@ func testAccAzureRMStorageShareFile_withFile(data acceptance.TestData, fileName %s resource "azurerm_storage_share_file" "test" { - name = "dir" - storage_share_id = azurerm_storage_share.test.id + name = "dir" + storage_share_id = azurerm_storage_share.test.id source = "%s" diff --git a/website/docs/r/storage_share_file.html.markdown b/website/docs/r/storage_share_file.html.markdown index 6c9de8313380..7368176e2476 100644 --- a/website/docs/r/storage_share_file.html.markdown +++ b/website/docs/r/storage_share_file.html.markdown @@ -33,9 +33,9 @@ resource "azurerm_storage_share" "example" { } resource "azurerm_storage_share_file" "example" { - name = "my-awesome-content.zip" - storage_share_id = azurerm_storage_share.example.id - source = "some-local-file.zip" + name = "my-awesome-content.zip" + storage_share_id = azurerm_storage_share.example.id + source = "some-local-file.zip" } ``` From 9e93fb8227be8b66256caf91a59c1668917bc2ec Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Tue, 8 Dec 2020 14:47:28 -0800 Subject: [PATCH 11/11] use test shimm --- .../resource_arm_storage_share_file.go | 6 +- .../resource_arm_storage_share_file_test.go | 279 +++++++----------- 2 files changed, 103 insertions(+), 182 deletions(-) diff --git a/azurerm/internal/services/storage/resource_arm_storage_share_file.go b/azurerm/internal/services/storage/resource_arm_storage_share_file.go index 6fac14306618..da2a85a5ae36 100644 --- a/azurerm/internal/services/storage/resource_arm_storage_share_file.go +++ b/azurerm/internal/services/storage/resource_arm_storage_share_file.go @@ -6,15 +6,13 @@ import ( "os" "time" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/storage/parse" - - storageValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/storage/validate" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/storage/parse" + storageValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/storage/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" "github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files" diff --git a/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go b/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go index 083f79d56157..1c7ac6f05b6c 100644 --- a/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go +++ b/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go @@ -1,106 +1,94 @@ package tests import ( + "context" "fmt" "io/ioutil" - "net/http" "testing" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/storage/parse" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" + "github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files" ) +type StorageShareFileResource struct { +} + func TestAccAzureRMStorageShareFile_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_storage_share_file", "test") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acceptance.PreCheck(t) }, - Providers: acceptance.SupportedProviders, - CheckDestroy: testCheckAzureRMStorageShareFileDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMStorageShareFile_basic(data), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageShareFileExists(data.ResourceName), - ), - }, - data.ImportStep(), + r := StorageShareFileResource{} + + data.ResourceTest(t, r, []resource.TestStep{ + { + Config: r.basic(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), }, + data.ImportStep(), }) } func TestAccAzureRMStorageShareFile_requiresImport(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_storage_share_file", "test") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acceptance.PreCheck(t) }, - Providers: acceptance.SupportedProviders, - CheckDestroy: testCheckAzureRMStorageShareFileDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMStorageShareFile_basic(data), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageShareFileExists(data.ResourceName), - ), - }, - data.RequiresImportErrorStep(testAccAzureRMStorageShareFile_requiresImport), + r := StorageShareFileResource{} + + data.ResourceTest(t, r, []resource.TestStep{ + { + Config: r.basic(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), }, + data.RequiresImportErrorStep(r.requiresImport), }) } func TestAccAzureRMStorageShareFile_complete(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_storage_share_file", "test") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acceptance.PreCheck(t) }, - Providers: acceptance.SupportedProviders, - CheckDestroy: testCheckAzureRMStorageShareFileDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMStorageShareFile_complete(data), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageShareFileExists(data.ResourceName), - ), - }, - data.ImportStep(), + r := StorageShareFileResource{} + + data.ResourceTest(t, r, []resource.TestStep{ + { + Config: r.complete(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), }, + data.ImportStep(), }) } func TestAccAzureRMStorageShareFile_update(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_storage_share_file", "test") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acceptance.PreCheck(t) }, - Providers: acceptance.SupportedProviders, - CheckDestroy: testCheckAzureRMStorageShareFileDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMStorageShareFile_basic(data), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageShareFileExists(data.ResourceName), - ), - }, - data.ImportStep(), - { - Config: testAccAzureRMStorageShareFile_complete(data), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageShareFileExists(data.ResourceName), - ), - }, - data.ImportStep(), - { - Config: testAccAzureRMStorageShareFile_basic(data), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageShareFileExists(data.ResourceName), - ), - }, - data.ImportStep(), + r := StorageShareFileResource{} + + data.ResourceTest(t, r, []resource.TestStep{ + { + Config: r.basic(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), }, + data.ImportStep(), + { + Config: r.complete(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), }) } @@ -114,121 +102,56 @@ func TestAccAzureRMStorageShareFile_withFile(t *testing.T) { t.Fatalf("Error populating temp file: %s", err) } data := acceptance.BuildTestData(t, "azurerm_storage_share_file", "test") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acceptance.PreCheck(t) }, - Providers: acceptance.SupportedProviders, - CheckDestroy: testCheckAzureRMStorageShareFileDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMStorageShareFile_withFile(data, sourceBlob.Name()), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageShareFileExists(data.ResourceName), - ), - }, - data.ImportStep("source"), + r := StorageShareFileResource{} + + data.ResourceTest(t, r, []resource.TestStep{ + { + Config: r.withFile(data, sourceBlob.Name()), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), }, + data.ImportStep("source"), }) } -func testCheckAzureRMStorageShareFileExists(resourceName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - storageClient := acceptance.AzureProvider.Meta().(*clients.Client).Storage - ctx := acceptance.AzureProvider.Meta().(*clients.Client).StopContext - - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return fmt.Errorf("Not found: %s", resourceName) - } - - name := rs.Primary.Attributes["name"] - storageShareID, err := parse.StorageShareDataPlaneID(rs.Primary.Attributes["storage_share_id"]) - if err != nil { - return err - } - path := rs.Primary.Attributes["path"] - - account, err := storageClient.FindAccount(ctx, storageShareID.AccountName) - if err != nil { - return fmt.Errorf("Error retrieving Account %q for File %q (Share %q): %s", storageShareID.AccountName, name, storageShareID.Name, err) - } - if account == nil { - return fmt.Errorf("Unable to locate Storage Account %q!", storageShareID.AccountName) - } - - client, err := storageClient.FileShareFilesClient(ctx, *account) - if err != nil { - return fmt.Errorf("Error building FileShare File Client: %s", err) - } - - resp, err := client.GetProperties(ctx, storageShareID.AccountName, storageShareID.Name, path, name) - if err != nil { - return fmt.Errorf("Bad: Get on FileShareFilesClient: %+v", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: File %q (File Share %q / Account %q / Resource Group %q) does not exist", name, storageShareID.Name, storageShareID.AccountName, account.ResourceGroup) - } - - return nil +func (StorageShareFileResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { + id, err := files.ParseResourceID(state.ID) + if err != nil { + return nil, err } -} - -func testCheckAzureRMStorageShareFileDestroy(s *terraform.State) error { - storageClient := acceptance.AzureProvider.Meta().(*clients.Client).Storage - ctx := acceptance.AzureProvider.Meta().(*clients.Client).StopContext - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_storage_share_file" { - continue - } - - name := rs.Primary.Attributes["name"] - storageShareID, err := parse.StorageShareDataPlaneID(rs.Primary.Attributes["storage_share_id"]) - if err != nil { - return err - } - path := rs.Primary.Attributes["path"] - account, err := storageClient.FindAccount(ctx, storageShareID.AccountName) - if err != nil { - return fmt.Errorf("Error retrieving Account %q for File %q (Share %q): %s", storageShareID.AccountName, name, storageShareID.Name, err) - } - - // not found, the account's gone - if account == nil { - return nil - } - - if err != nil { - return fmt.Errorf("Error locating Resource Group for Storage Share File %q (Share %s, Account %s): %s", name, storageShareID.Name, storageShareID.AccountName, err) - } + account, err := clients.Storage.FindAccount(ctx, id.AccountName) + if err != nil { + return nil, fmt.Errorf("retrieving Account %q for File %q (Share %q): %s", id.AccountName, id.FileName, id.ShareName, err) + } + if account == nil { + return utils.Bool(false), nil + } - client, err := storageClient.FileShareFilesClient(ctx, *account) - if err != nil { - return fmt.Errorf("Error building FileShare File Client: %s", err) - } + client, err := clients.Storage.FileShareFilesClient(ctx, *account) + if err != nil { + return nil, fmt.Errorf("building File Share Files Client: %s", err) + } - resp, err := client.GetProperties(ctx, storageShareID.AccountName, storageShareID.Name, path, name) - if err != nil { - return nil + resp, err := client.GetProperties(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName) + if err != nil { + if !utils.ResponseWasNotFound(resp.Response) { + return nil, fmt.Errorf("checking for presence of existing File %q (File Share %q / Storage Account %q / Resource Group %q): %s", id.FileName, id.ShareName, id.AccountName, account.ResourceGroup, err) } - - return fmt.Errorf("File Share still exists:\n%#v", resp) } - return nil + return utils.Bool(true), nil } -func testAccAzureRMStorageShareFile_template(data acceptance.TestData) string { +func (StorageShareFileResource) template(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { features {} } resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" + name = "acctestRG-storage-%d" location = "%s" } @@ -248,8 +171,7 @@ resource "azurerm_storage_share" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomString) } -func testAccAzureRMStorageShareFile_basic(data acceptance.TestData) string { - template := testAccAzureRMStorageShareFile_template(data) +func (r StorageShareFileResource) basic(data acceptance.TestData) string { return fmt.Sprintf(` %s @@ -261,23 +183,25 @@ resource "azurerm_storage_share_file" "test" { hello = "world" } } -`, template) +`, r.template(data)) } -func testAccAzureRMStorageShareFile_requiresImport(data acceptance.TestData) string { - template := testAccAzureRMStorageShareFile_basic(data) +func (r StorageShareFileResource) requiresImport(data acceptance.TestData) string { return fmt.Sprintf(` %s resource "azurerm_storage_share_file" "import" { name = azurerm_storage_share_file.test.name - storage_share_id = azurerm_storage_share.test.id + storage_share_id = azurerm_storage_share_file.test.storage_share_id + + metadata = { + hello = "world" + } } -`, template) +`, r.basic(data)) } -func testAccAzureRMStorageShareFile_complete(data acceptance.TestData) string { - template := testAccAzureRMStorageShareFile_template(data) +func (r StorageShareFileResource) complete(data acceptance.TestData) string { return fmt.Sprintf(` %s @@ -294,11 +218,10 @@ resource "azurerm_storage_share_file" "test" { hello = "world" } } -`, template) +`, r.template(data)) } -func testAccAzureRMStorageShareFile_withFile(data acceptance.TestData, fileName string) string { - template := testAccAzureRMStorageShareFile_template(data) +func (r StorageShareFileResource) withFile(data acceptance.TestData, fileName string) string { return fmt.Sprintf(` %s @@ -312,5 +235,5 @@ resource "azurerm_storage_share_file" "test" { hello = "world" } } -`, template, fileName) +`, r.template(data), fileName) }