diff --git a/azurerm/internal/services/storage/client/client.go b/azurerm/internal/services/storage/client/client.go index d09aa21ee362..d53df8621fdd 100644 --- a/azurerm/internal/services/storage/client/client.go +++ b/azurerm/internal/services/storage/client/client.go @@ -16,6 +16,7 @@ import ( "github.com/tombuildsstuff/giovanni/storage/2019-12-12/datalakestore/filesystems" "github.com/tombuildsstuff/giovanni/storage/2019-12-12/datalakestore/paths" "github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/directories" + "github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files" "github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/shares" "github.com/tombuildsstuff/giovanni/storage/2019-12-12/queue/queues" "github.com/tombuildsstuff/giovanni/storage/2019-12-12/table/entities" @@ -174,6 +175,24 @@ func (client Client) FileShareDirectoriesClient(ctx context.Context, account acc return &directoriesClient, nil } +func (client Client) FileShareFilesClient(ctx context.Context, account accountDetails) (*files.Client, error) { + // NOTE: Files do not support AzureAD Authentication + + accountKey, err := account.AccountKey(ctx, client) + if err != nil { + return nil, fmt.Errorf("Error retrieving Account Key: %s", err) + } + + storageAuth, err := autorest.NewSharedKeyAuthorizer(account.name, *accountKey, autorest.SharedKeyLite) + if err != nil { + return nil, fmt.Errorf("Error building Authorizer: %+v", err) + } + + filesClient := files.NewWithEnvironment(client.Environment) + filesClient.Client.Authorizer = storageAuth + return &filesClient, nil +} + func (client Client) FileSharesClient(ctx context.Context, account accountDetails) (shim.StorageShareWrapper, error) { // NOTE: Files do not support AzureAD Authentication diff --git a/azurerm/internal/services/storage/registration.go b/azurerm/internal/services/storage/registration.go index 5835522e08f2..d8943ee904be 100644 --- a/azurerm/internal/services/storage/registration.go +++ b/azurerm/internal/services/storage/registration.go @@ -46,6 +46,7 @@ func (r Registration) SupportedResources() map[string]*schema.Resource { "azurerm_storage_management_policy": resourceArmStorageManagementPolicy(), "azurerm_storage_queue": resourceArmStorageQueue(), "azurerm_storage_share": resourceArmStorageShare(), + "azurerm_storage_share_file": resourceArmStorageShareFile(), "azurerm_storage_share_directory": resourceArmStorageShareDirectory(), "azurerm_storage_table": resourceArmStorageTable(), "azurerm_storage_table_entity": resourceArmStorageTableEntity(), diff --git a/azurerm/internal/services/storage/resource_arm_storage_share_file.go b/azurerm/internal/services/storage/resource_arm_storage_share_file.go new file mode 100644 index 000000000000..da2a85a5ae36 --- /dev/null +++ b/azurerm/internal/services/storage/resource_arm_storage_share_file.go @@ -0,0 +1,338 @@ +package storage + +import ( + "fmt" + "log" + "os" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/storage/parse" + storageValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/storage/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" + "github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files" +) + +func resourceArmStorageShareFile() *schema.Resource { + return &schema.Resource{ + Create: resourceArmStorageShareFileCreate, + Read: resourceArmStorageShareFileRead, + Update: resourceArmStorageShareFileUpdate, + Delete: resourceArmStorageShareFileDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Read: schema.DefaultTimeout(5 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "storage_share_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: storageValidate.StorageShareID, + }, + "path": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Default: "", + ValidateFunc: validate.StorageShareDirectoryName, + }, + + "content_type": { + Type: schema.TypeString, + Optional: true, + Default: "application/octet-stream", + }, + + "content_encoding": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "content_md5": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "content_disposition": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "source": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + ForceNew: true, + }, + + "metadata": MetaDataSchema(), + }, + } +} + +func resourceArmStorageShareFileCreate(d *schema.ResourceData, meta interface{}) error { + ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) + defer cancel() + storageClient := meta.(*clients.Client).Storage + + storageShareID, err := parse.StorageShareDataPlaneID(d.Get("storage_share_id").(string)) + if err != nil { + return err + } + + fileName := d.Get("name").(string) + path := d.Get("path").(string) + + account, err := storageClient.FindAccount(ctx, storageShareID.AccountName) + if err != nil { + return fmt.Errorf("eretrieving Account %q for File %q (Share %q): %s", storageShareID.AccountName, fileName, storageShareID.Name, err) + } + if account == nil { + return fmt.Errorf("unable to locate Storage Account %q!", storageShareID.AccountName) + } + + fileSharesClient, err := storageClient.FileSharesClient(ctx, *account) + if err != nil { + return fmt.Errorf("building File Share Directories Client: %s", err) + } + + share, err := fileSharesClient.Get(ctx, account.ResourceGroup, storageShareID.AccountName, storageShareID.Name) + if err != nil { + return fmt.Errorf("retrieving Share %q for File %q: %s", storageShareID.Name, fileName, err) + } + if share == nil { + return fmt.Errorf("unable to locate Storage Share %q", storageShareID.Name) + } + + client, err := storageClient.FileShareFilesClient(ctx, *account) + if err != nil { + return fmt.Errorf("building File Share Directories Client: %s", err) + } + + existing, err := client.GetProperties(ctx, storageShareID.AccountName, storageShareID.Name, path, fileName) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("checking for presence of existing File %q (File Share %q / Storage Account %q / Resource Group %q): %s", fileName, storageShareID.Name, storageShareID.AccountName, account.ResourceGroup, err) + } + } + + if !utils.ResponseWasNotFound(existing.Response) { + id := client.GetResourceID(storageShareID.AccountName, storageShareID.Name, path, fileName) + return tf.ImportAsExistsError("azurerm_storage_share_file", id) + } + + input := files.CreateInput{ + MetaData: ExpandMetaData(d.Get("metadata").(map[string]interface{})), + ContentType: utils.String(d.Get("content_type").(string)), + ContentEncoding: utils.String(d.Get("content_encoding").(string)), + ContentDisposition: utils.String(d.Get("content_disposition").(string)), + } + + if v, ok := d.GetOk("content_md5"); ok { + input.ContentMD5 = utils.String(v.(string)) + } + + var file *os.File + if v, ok := d.GetOk("source"); ok { + file, err = os.Open(v.(string)) + if err != nil { + return fmt.Errorf("opening file : %s", err) + } + + info, err := file.Stat() + if err != nil { + return fmt.Errorf("'stat'-ing File %q (File Share %q / Account %q): %+v", fileName, storageShareID.Name, storageShareID.AccountName, err) + } + + input.ContentLength = info.Size() + } + + if _, err := client.Create(ctx, storageShareID.AccountName, storageShareID.Name, path, fileName, input); err != nil { + return fmt.Errorf("creating File %q (File Share %q / Account %q): %+v", fileName, storageShareID.Name, storageShareID.AccountName, err) + } + + if file != nil { + if err := client.PutFile(ctx, storageShareID.AccountName, storageShareID.Name, path, fileName, file, 4); err != nil { + return fmt.Errorf("uploading File: %q (File Share %q / Account %q): %+v", fileName, storageShareID.Name, storageShareID.AccountName, err) + } + } + + resourceID := client.GetResourceID(storageShareID.AccountName, storageShareID.Name, path, fileName) + d.SetId(resourceID) + + return resourceArmStorageShareFileRead(d, meta) +} + +func resourceArmStorageShareFileUpdate(d *schema.ResourceData, meta interface{}) error { + ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + storageClient := meta.(*clients.Client).Storage + + id, err := files.ParseResourceID(d.Id()) + if err != nil { + return err + } + + account, err := storageClient.FindAccount(ctx, id.AccountName) + if err != nil { + return fmt.Errorf("retrieving Account %q for File %q (Share %q): %s", id.AccountName, id.FileName, id.ShareName, err) + } + if account == nil { + return fmt.Errorf("unable to locate Storage Account %q!", id.AccountName) + } + + fileSharesClient, err := storageClient.FileSharesClient(ctx, *account) + if err != nil { + return fmt.Errorf("building File Share Directories Client: %s", err) + } + + share, err := fileSharesClient.Get(ctx, account.ResourceGroup, id.AccountName, id.ShareName) + if err != nil { + return fmt.Errorf("retrieving Share %q for File %q: %s", id.ShareName, id.FileName, err) + } + if share == nil { + return fmt.Errorf("unable to locate Storage Share %q", id.ShareName) + } + + client, err := storageClient.FileShareFilesClient(ctx, *account) + if err != nil { + return fmt.Errorf("building File Share Files Client: %s", err) + } + + existing, err := client.GetProperties(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("checking for presence of existing File %q (File Share %q / Storage Account %q / Resource Group %q): %s", id.FileName, id.ShareName, id.AccountName, account.ResourceGroup, err) + } + } + + if d.HasChange("content_type") || d.HasChange("content_encoding") || d.HasChange("content_disposition") || d.HasChange("content_md5") { + input := files.SetPropertiesInput{ + ContentType: utils.String(d.Get("content_type").(string)), + ContentEncoding: utils.String(d.Get("content_encoding").(string)), + ContentDisposition: utils.String(d.Get("content_disposition").(string)), + MetaData: ExpandMetaData(d.Get("metadata").(map[string]interface{})), + } + + if v, ok := d.GetOk("content_md5"); ok { + input.ContentMD5 = utils.String(v.(string)) + } + + if _, err := client.SetProperties(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName, input); err != nil { + return fmt.Errorf("creating File %q (File Share %q / Account %q): %+v", id.FileName, id.ShareName, id.AccountName, err) + } + } + + return resourceArmStorageShareFileRead(d, meta) +} + +func resourceArmStorageShareFileRead(d *schema.ResourceData, meta interface{}) error { + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + storageClient := meta.(*clients.Client).Storage + + id, err := files.ParseResourceID(d.Id()) + if err != nil { + return err + } + + account, err := storageClient.FindAccount(ctx, id.AccountName) + if err != nil { + return fmt.Errorf("retrieving Account %q for File %q (Share %q): %s", id.AccountName, id.FileName, id.ShareName, err) + } + if account == nil { + log.Printf("[WARN] Unable to determine Storage Account for Storage Share File %q (Share %s, Account %s) - assuming removed & removing from state", id.FileName, id.ShareName, id.AccountName) + d.SetId("") + return nil + } + + fileSharesClient, err := storageClient.FileSharesClient(ctx, *account) + if err != nil { + return fmt.Errorf("building File Share Directories Client: %s", err) + } + + share, err := fileSharesClient.Get(ctx, account.ResourceGroup, id.AccountName, id.ShareName) + if err != nil { + return fmt.Errorf("retrieving Share %q for File %q: %s", id.ShareName, id.FileName, err) + } + if share == nil { + log.Printf("[WARN] Unable to determine Storage Share for Storage Share File %q (Share %s, Account %s) - assuming removed & removing from state", id.FileName, id.ShareName, id.AccountName) + d.SetId("") + return nil + } + + client, err := storageClient.FileShareFilesClient(ctx, *account) + if err != nil { + return fmt.Errorf("building File Share Client for Storage Account %q (Resource Group %q): %s", id.AccountName, account.ResourceGroup, err) + } + + props, err := client.GetProperties(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName) + if err != nil { + return fmt.Errorf("retrieving Storage Share %q (File Share %q / Account %q / Resource Group %q): %s", id.DirectoryName, id.ShareName, id.AccountName, account.ResourceGroup, err) + } + + d.Set("name", id.FileName) + d.Set("path", id.DirectoryName) + d.Set("storage_share_id", parse.NewStorageShareDataPlaneId(id.AccountName, storageClient.Environment.StorageEndpointSuffix, id.ShareName).ID("")) + + if err := d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { + return fmt.Errorf("setting `metadata`: %s", err) + } + d.Set("content_type", props.ContentType) + d.Set("content_encoding", props.ContentEncoding) + d.Set("content_md5", props.ContentMD5) + d.Set("content_disposition", props.ContentDisposition) + + return nil +} + +func resourceArmStorageShareFileDelete(d *schema.ResourceData, meta interface{}) error { + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + storageClient := meta.(*clients.Client).Storage + + id, err := files.ParseResourceID(d.Id()) + if err != nil { + return err + } + + account, err := storageClient.FindAccount(ctx, id.AccountName) + if err != nil { + return fmt.Errorf("retrieving Account %q for File %q (Share %q): %s", id.AccountName, id.FileName, id.ShareName, err) + } + if account == nil { + return fmt.Errorf("unable to locate Storage Account %q", id.AccountName) + } + + client, err := storageClient.FileShareFilesClient(ctx, *account) + if err != nil { + return fmt.Errorf("building File Share File Client for Storage Account %q (Resource Group %q): %s", id.AccountName, account.ResourceGroup, err) + } + + if _, err := client.Delete(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName); err != nil { + return fmt.Errorf("deleting Storage Share File %q (File Share %q / Account %q / Resource Group %q): %s", id.FileName, id.ShareName, id.AccountName, account.ResourceGroup, err) + } + + return nil +} diff --git a/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go b/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go new file mode 100644 index 000000000000..1c7ac6f05b6c --- /dev/null +++ b/azurerm/internal/services/storage/tests/resource_arm_storage_share_file_test.go @@ -0,0 +1,239 @@ +package tests + +import ( + "context" + "fmt" + "io/ioutil" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" + "github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files" +) + +type StorageShareFileResource struct { +} + +func TestAccAzureRMStorageShareFile_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_storage_share_file", "test") + r := StorageShareFileResource{} + + data.ResourceTest(t, r, []resource.TestStep{ + { + Config: r.basic(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccAzureRMStorageShareFile_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_storage_share_file", "test") + r := StorageShareFileResource{} + + data.ResourceTest(t, r, []resource.TestStep{ + { + Config: r.basic(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func TestAccAzureRMStorageShareFile_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_storage_share_file", "test") + r := StorageShareFileResource{} + + data.ResourceTest(t, r, []resource.TestStep{ + { + Config: r.complete(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccAzureRMStorageShareFile_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_storage_share_file", "test") + r := StorageShareFileResource{} + + data.ResourceTest(t, r, []resource.TestStep{ + { + Config: r.basic(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.complete(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccAzureRMStorageShareFile_withFile(t *testing.T) { + sourceBlob, err := ioutil.TempFile("", "") + if err != nil { + t.Fatalf("Failed to create local source blob file") + } + + if err := testAccAzureRMStorageBlob_populateTempFile(sourceBlob); err != nil { + t.Fatalf("Error populating temp file: %s", err) + } + data := acceptance.BuildTestData(t, "azurerm_storage_share_file", "test") + r := StorageShareFileResource{} + + data.ResourceTest(t, r, []resource.TestStep{ + { + Config: r.withFile(data, sourceBlob.Name()), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep("source"), + }) +} + +func (StorageShareFileResource) Exists(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) (*bool, error) { + id, err := files.ParseResourceID(state.ID) + if err != nil { + return nil, err + } + + account, err := clients.Storage.FindAccount(ctx, id.AccountName) + if err != nil { + return nil, fmt.Errorf("retrieving Account %q for File %q (Share %q): %s", id.AccountName, id.FileName, id.ShareName, err) + } + if account == nil { + return utils.Bool(false), nil + } + + client, err := clients.Storage.FileShareFilesClient(ctx, *account) + if err != nil { + return nil, fmt.Errorf("building File Share Files Client: %s", err) + } + + resp, err := client.GetProperties(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName) + if err != nil { + if !utils.ResponseWasNotFound(resp.Response) { + return nil, fmt.Errorf("checking for presence of existing File %q (File Share %q / Storage Account %q / Resource Group %q): %s", id.FileName, id.ShareName, id.AccountName, account.ResourceGroup, err) + } + } + + return utils.Bool(true), nil +} + +func (StorageShareFileResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-storage-%d" + location = "%s" +} + +resource "azurerm_storage_account" "test" { + name = "acctestsa%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_tier = "Standard" + account_replication_type = "LRS" +} + +resource "azurerm_storage_share" "test" { + name = "fileshare" + storage_account_name = azurerm_storage_account.test.name + quota = 50 +} +`, data.RandomInteger, data.Locations.Primary, data.RandomString) +} + +func (r StorageShareFileResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_storage_share_file" "test" { + name = "dir" + storage_share_id = azurerm_storage_share.test.id + + metadata = { + hello = "world" + } +} +`, r.template(data)) +} + +func (r StorageShareFileResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_storage_share_file" "import" { + name = azurerm_storage_share_file.test.name + storage_share_id = azurerm_storage_share_file.test.storage_share_id + + metadata = { + hello = "world" + } +} +`, r.basic(data)) +} + +func (r StorageShareFileResource) complete(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_storage_share_file" "test" { + name = "dir" + storage_share_id = azurerm_storage_share.test.id + + + content_type = "test_content_type" + content_encoding = "test_encoding" + content_disposition = "test_content_disposition" + + metadata = { + hello = "world" + } +} +`, r.template(data)) +} + +func (r StorageShareFileResource) withFile(data acceptance.TestData, fileName string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_storage_share_file" "test" { + name = "dir" + storage_share_id = azurerm_storage_share.test.id + + source = "%s" + + metadata = { + hello = "world" + } +} +`, r.template(data), fileName) +} diff --git a/azurerm/internal/services/storage/validate/storage_share_name.go b/azurerm/internal/services/storage/validate/storage_share_name.go index 820ed95f95ea..30aec2543137 100644 --- a/azurerm/internal/services/storage/validate/storage_share_name.go +++ b/azurerm/internal/services/storage/validate/storage_share_name.go @@ -3,6 +3,8 @@ package validate import ( "fmt" "regexp" + + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/storage/parse" ) func StorageShareName(v interface{}, k string) (warnings []string, errors []error) { @@ -28,3 +30,18 @@ func StorageShareName(v interface{}, k string) (warnings []string, errors []erro } return warnings, errors } + +func StorageShareID(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %q to be string", k)) + return + } + + if _, err := parse.StorageShareDataPlaneID(v); err != nil { + errors = append(errors, fmt.Errorf("Can not parse %q as a resource id: %v", k, err)) + return + } + + return warnings, errors +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/README.md b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/README.md new file mode 100644 index 000000000000..e9db27b5a862 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/README.md @@ -0,0 +1,47 @@ +## File Storage Files SDK for API version 2019-12-12 + +This package allows you to interact with the Files File Storage API + +### Supported Authorizers + +* Azure Active Directory (for the Resource Endpoint `https://storage.azure.com`) +* SharedKeyLite (Blob, File & Queue) + +### Limitations + +* At this time the headers `x-ms-file-permission` and `x-ms-file-attributes` are hard-coded (to `inherit` and `None`, respectively). + +### Example Usage + +```go +package main + +import ( + "context" + "fmt" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files" +) + +func Example() error { + accountName := "storageaccount1" + storageAccountKey := "ABC123...." + shareName := "myshare" + directoryName := "myfiles" + fileName := "example.txt" + + storageAuth := autorest.NewSharedKeyLiteAuthorizer(accountName, storageAccountKey) + filesClient := files.New() + filesClient.Client.Authorizer = storageAuth + + ctx := context.TODO() + input := files.CreateInput{} + if _, err := filesClient.Create(ctx, accountName, shareName, directoryName, fileName, input); err != nil { + return fmt.Errorf("Error creating File: %s", err) + } + + return nil +} +``` \ No newline at end of file diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/api.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/api.go new file mode 100644 index 000000000000..96a0491e70da --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/api.go @@ -0,0 +1,28 @@ +package files + +import ( + "context" + "os" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +type StorageFile interface { + PutByteRange(ctx context.Context, accountName, shareName, path, fileName string, input PutByteRangeInput) (result autorest.Response, err error) + GetByteRange(ctx context.Context, accountName, shareName, path, fileName string, input GetByteRangeInput) (result GetByteRangeResult, err error) + ClearByteRange(ctx context.Context, accountName, shareName, path, fileName string, input ClearByteRangeInput) (result autorest.Response, err error) + SetProperties(ctx context.Context, accountName, shareName, path, fileName string, input SetPropertiesInput) (result autorest.Response, err error) + PutFile(ctx context.Context, accountName, shareName, path, fileName string, file *os.File, parallelism int) error + Copy(ctx context.Context, accountName, shareName, path, fileName string, input CopyInput) (result CopyResult, err error) + SetMetaData(ctx context.Context, accountName, shareName, path, fileName string, metaData map[string]string) (result autorest.Response, err error) + GetMetaData(ctx context.Context, accountName, shareName, path, fileName string) (result GetMetaDataResult, err error) + AbortCopy(ctx context.Context, accountName, shareName, path, fileName, copyID string) (result autorest.Response, err error) + GetFile(ctx context.Context, accountName, shareName, path, fileName string, parallelism int) (result autorest.Response, outputBytes []byte, err error) + GetResourceID(accountName, shareName, directoryName, filePath string) string + ListRanges(ctx context.Context, accountName, shareName, path, fileName string) (result ListRangesResult, err error) + GetProperties(ctx context.Context, accountName, shareName, path, fileName string) (result GetResult, err error) + Delete(ctx context.Context, accountName, shareName, path, fileName string) (result autorest.Response, err error) + Create(ctx context.Context, accountName, shareName, path, fileName string, input CreateInput) (result autorest.Response, err error) + CopyAndWait(ctx context.Context, accountName, shareName, path, fileName string, input CopyInput, pollDuration time.Duration) (result CopyResult, err error) +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/client.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/client.go new file mode 100644 index 000000000000..ecca81586b5a --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/client.go @@ -0,0 +1,25 @@ +package files + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Client is the base client for File Storage Shares. +type Client struct { + autorest.Client + BaseURI string +} + +// New creates an instance of the Client client. +func New() Client { + return NewWithEnvironment(azure.PublicCloud) +} + +// NewWithEnvironment creates an instance of the Client client. +func NewWithEnvironment(environment azure.Environment) Client { + return Client{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: environment.StorageEndpointSuffix, + } +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/copy.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/copy.go new file mode 100644 index 000000000000..31768b3d52b7 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/copy.go @@ -0,0 +1,132 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type CopyInput struct { + // Specifies the URL of the source file or blob, up to 2 KB in length. + // + // To copy a file to another file within the same storage account, you may use Shared Key to authenticate + // the source file. If you are copying a file from another storage account, or if you are copying a blob from + // the same storage account or another storage account, then you must authenticate the source file or blob using a + // shared access signature. If the source is a public blob, no authentication is required to perform the copy + // operation. A file in a share snapshot can also be specified as a copy source. + CopySource string + + MetaData map[string]string +} + +type CopyResult struct { + autorest.Response + + // The CopyID, which can be passed to AbortCopy to abort the copy. + CopyID string + + // Either `success` or `pending` + CopySuccess string +} + +// Copy copies a blob or file to a destination file within the storage account asynchronously. +func (client Client) Copy(ctx context.Context, accountName, shareName, path, fileName string, input CopyInput) (result CopyResult, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "Copy", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "Copy", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "Copy", "`shareName` must be a lower-cased string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "Copy", "`fileName` cannot be an empty string.") + } + if input.CopySource == "" { + return result, validation.NewError("files.Client", "Copy", "`input.CopySource` cannot be an empty string.") + } + if err := metadata.Validate(input.MetaData); err != nil { + return result, validation.NewError("files.Client", "Copy", fmt.Sprintf("`input.MetaData` is not valid: %s.", err)) + } + + req, err := client.CopyPreparer(ctx, accountName, shareName, path, fileName, input) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "Copy", nil, "Failure preparing request") + return + } + + resp, err := client.CopySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "Copy", resp, "Failure sending request") + return + } + + result, err = client.CopyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "Copy", resp, "Failure responding to request") + return + } + + return +} + +// CopyPreparer prepares the Copy request. +func (client Client) CopyPreparer(ctx context.Context, accountName, shareName, path, fileName string, input CopyInput) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + "x-ms-copy-source": input.CopySource, + } + + headers = metadata.SetIntoHeaders(headers, input.MetaData) + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/xml; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithHeaders(headers)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CopySender sends the Copy request. The method will close the +// http.Response Body if it receives an error. +func (client Client) CopySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CopyResponder handles the response to the Copy request. The method always +// closes the http.Response Body. +func (client Client) CopyResponder(resp *http.Response) (result CopyResult, err error) { + if resp != nil && resp.Header != nil { + result.CopyID = resp.Header.Get("x-ms-copy-id") + result.CopySuccess = resp.Header.Get("x-ms-copy-status") + } + + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusAccepted), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/copy_abort.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/copy_abort.go new file mode 100644 index 000000000000..2f0913185888 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/copy_abort.go @@ -0,0 +1,104 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" +) + +// AbortCopy aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata +func (client Client) AbortCopy(ctx context.Context, accountName, shareName, path, fileName, copyID string) (result autorest.Response, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "AbortCopy", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "AbortCopy", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "AbortCopy", "`shareName` must be a lower-cased string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "AbortCopy", "`fileName` cannot be an empty string.") + } + if copyID == "" { + return result, validation.NewError("files.Client", "AbortCopy", "`copyID` cannot be an empty string.") + } + + req, err := client.AbortCopyPreparer(ctx, accountName, shareName, path, fileName, copyID) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "AbortCopy", nil, "Failure preparing request") + return + } + + resp, err := client.AbortCopySender(req) + if err != nil { + result = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "AbortCopy", resp, "Failure sending request") + return + } + + result, err = client.AbortCopyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "AbortCopy", resp, "Failure responding to request") + return + } + + return +} + +// AbortCopyPreparer prepares the AbortCopy request. +func (client Client) AbortCopyPreparer(ctx context.Context, accountName, shareName, path, fileName, copyID string) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + queryParameters := map[string]interface{}{ + "comp": autorest.Encode("query", "copy"), + "copyid": autorest.Encode("query", copyID), + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + "x-ms-copy-action": "abort", + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/xml; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithQueryParameters(queryParameters), + autorest.WithHeaders(headers)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// AbortCopySender sends the AbortCopy request. The method will close the +// http.Response Body if it receives an error. +func (client Client) AbortCopySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// AbortCopyResponder handles the response to the AbortCopy request. The method always +// closes the http.Response Body. +func (client Client) AbortCopyResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusNoContent), + autorest.ByClosing()) + result = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/copy_wait.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/copy_wait.go new file mode 100644 index 000000000000..e6a646b1017b --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/copy_wait.go @@ -0,0 +1,55 @@ +package files + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +type CopyAndWaitResult struct { + autorest.Response + + CopyID string +} + +const DefaultCopyPollDuration = 15 * time.Second + +// CopyAndWait is a convenience method which doesn't exist in the API, which copies the file and then waits for the copy to complete +func (client Client) CopyAndWait(ctx context.Context, accountName, shareName, path, fileName string, input CopyInput, pollDuration time.Duration) (result CopyResult, err error) { + copy, e := client.Copy(ctx, accountName, shareName, path, fileName, input) + if err != nil { + result.Response = copy.Response + err = fmt.Errorf("Error copying: %s", e) + return + } + + result.CopyID = copy.CopyID + + // since the API doesn't return a LRO, this is a hack which also polls every 10s, but should be sufficient + for true { + props, e := client.GetProperties(ctx, accountName, shareName, path, fileName) + if e != nil { + result.Response = copy.Response + err = fmt.Errorf("Error waiting for copy: %s", e) + return + } + + switch strings.ToLower(props.CopyStatus) { + case "pending": + time.Sleep(pollDuration) + continue + + case "success": + return + + default: + err = fmt.Errorf("Unexpected CopyState %q", e) + return + } + } + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/create.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/create.go new file mode 100644 index 000000000000..d2b4ff358120 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/create.go @@ -0,0 +1,169 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type CreateInput struct { + // This header specifies the maximum size for the file, up to 1 TiB. + ContentLength int64 + + // The MIME content type of the file + // If not specified, the default type is application/octet-stream. + ContentType *string + + // Specifies which content encodings have been applied to the file. + // This value is returned to the client when the Get File operation is performed + // on the file resource and can be used to decode file content. + ContentEncoding *string + + // Specifies the natural languages used by this resource. + ContentLanguage *string + + // The File service stores this value but does not use or modify it. + CacheControl *string + + // Sets the file's MD5 hash. + ContentMD5 *string + + // Sets the file’s Content-Disposition header. + ContentDisposition *string + + // The time at which this file was created at - if omitted, this'll be set to "now" + // This maps to the `x-ms-file-creation-time` field. + CreatedAt *time.Time + + // The time at which this file was last modified - if omitted, this'll be set to "now" + // This maps to the `x-ms-file-last-write-time` field. + LastModified *time.Time + + // MetaData is a mapping of key value pairs which should be assigned to this file + MetaData map[string]string +} + +// Create creates a new file or replaces a file. +func (client Client) Create(ctx context.Context, accountName, shareName, path, fileName string, input CreateInput) (result autorest.Response, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "Create", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "Create", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "Create", "`shareName` must be a lower-cased string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "Create", "`fileName` cannot be an empty string.") + } + if err := metadata.Validate(input.MetaData); err != nil { + return result, validation.NewError("files.Client", "Create", "`input.MetaData` cannot be an empty string.") + } + + req, err := client.CreatePreparer(ctx, accountName, shareName, path, fileName, input) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "Create", nil, "Failure preparing request") + return + } + + resp, err := client.CreateSender(req) + if err != nil { + result = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "Create", resp, "Failure sending request") + return + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "Create", resp, "Failure responding to request") + return + } + + return +} + +// CreatePreparer prepares the Create request. +func (client Client) CreatePreparer(ctx context.Context, accountName, shareName, path, fileName string, input CreateInput) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + var coalesceDate = func(input *time.Time, defaultVal string) string { + if input == nil { + return defaultVal + } + + return input.Format(time.RFC1123) + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + "x-ms-content-length": input.ContentLength, + "x-ms-type": "file", + + "x-ms-file-permission": "inherit", // TODO: expose this in future + "x-ms-file-attributes": "None", // TODO: expose this in future + "x-ms-file-creation-time": coalesceDate(input.CreatedAt, "now"), + "x-ms-file-last-write-time": coalesceDate(input.LastModified, "now"), + } + + if input.ContentDisposition != nil { + headers["x-ms-content-disposition"] = *input.ContentDisposition + } + + if input.ContentEncoding != nil { + headers["x-ms-content-encoding"] = *input.ContentEncoding + } + + if input.ContentMD5 != nil { + headers["x-ms-content-md5"] = *input.ContentMD5 + } + + if input.ContentType != nil { + headers["x-ms-content-type"] = *input.ContentType + } + + headers = metadata.SetIntoHeaders(headers, input.MetaData) + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/xml; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithHeaders(headers)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client Client) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client Client) CreateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated), + autorest.ByClosing()) + result = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/delete.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/delete.go new file mode 100644 index 000000000000..5debd767d1fd --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/delete.go @@ -0,0 +1,94 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" +) + +// Delete immediately deletes the file from the File Share. +func (client Client) Delete(ctx context.Context, accountName, shareName, path, fileName string) (result autorest.Response, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "Delete", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "Delete", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "Delete", "`shareName` must be a lower-cased string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "Delete", "`fileName` cannot be an empty string.") + } + + req, err := client.DeletePreparer(ctx, accountName, shareName, path, fileName) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "Delete", resp, "Failure responding to request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client Client) DeletePreparer(ctx context.Context, accountName, shareName, path, fileName string) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/xml; charset=utf-8"), + autorest.AsDelete(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithHeaders(headers)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client Client) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client Client) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusAccepted), + autorest.ByClosing()) + result = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/metadata_get.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/metadata_get.go new file mode 100644 index 000000000000..fd62f90aec8f --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/metadata_get.go @@ -0,0 +1,111 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type GetMetaDataResult struct { + autorest.Response + + MetaData map[string]string +} + +// GetMetaData returns the MetaData for the specified File. +func (client Client) GetMetaData(ctx context.Context, accountName, shareName, path, fileName string) (result GetMetaDataResult, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "GetMetaData", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "GetMetaData", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "GetMetaData", "`shareName` must be a lower-cased string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "GetMetaData", "`fileName` cannot be an empty string.") + } + + req, err := client.GetMetaDataPreparer(ctx, accountName, shareName, path, fileName) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "GetMetaData", nil, "Failure preparing request") + return + } + + resp, err := client.GetMetaDataSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "GetMetaData", resp, "Failure sending request") + return + } + + result, err = client.GetMetaDataResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "GetMetaData", resp, "Failure responding to request") + return + } + + return +} + +// GetMetaDataPreparer prepares the GetMetaData request. +func (client Client) GetMetaDataPreparer(ctx context.Context, accountName, shareName, path, fileName string) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + queryParameters := map[string]interface{}{ + "comp": autorest.Encode("query", "metadata"), + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/xml; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithQueryParameters(queryParameters), + autorest.WithHeaders(headers)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetMetaDataSender sends the GetMetaData request. The method will close the +// http.Response Body if it receives an error. +func (client Client) GetMetaDataSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetMetaDataResponder handles the response to the GetMetaData request. The method always +// closes the http.Response Body. +func (client Client) GetMetaDataResponder(resp *http.Response) (result GetMetaDataResult, err error) { + if resp != nil && resp.Header != nil { + result.MetaData = metadata.ParseFromHeaders(resp.Header) + } + + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + //metadata.ByParsingFromHeaders(&result.MetaData), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/metadata_set.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/metadata_set.go new file mode 100644 index 000000000000..41e3ffcb8ff9 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/metadata_set.go @@ -0,0 +1,105 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +// SetMetaData updates the specified File to have the specified MetaData. +func (client Client) SetMetaData(ctx context.Context, accountName, shareName, path, fileName string, metaData map[string]string) (result autorest.Response, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "SetMetaData", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "SetMetaData", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "SetMetaData", "`shareName` must be a lower-cased string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "SetMetaData", "`fileName` cannot be an empty string.") + } + if err := metadata.Validate(metaData); err != nil { + return result, validation.NewError("files.Client", "SetMetaData", fmt.Sprintf("`metaData` is not valid: %s.", err)) + } + + req, err := client.SetMetaDataPreparer(ctx, accountName, shareName, path, fileName, metaData) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "SetMetaData", nil, "Failure preparing request") + return + } + + resp, err := client.SetMetaDataSender(req) + if err != nil { + result = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "SetMetaData", resp, "Failure sending request") + return + } + + result, err = client.SetMetaDataResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "SetMetaData", resp, "Failure responding to request") + return + } + + return +} + +// SetMetaDataPreparer prepares the SetMetaData request. +func (client Client) SetMetaDataPreparer(ctx context.Context, accountName, shareName, path, fileName string, metaData map[string]string) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + queryParameters := map[string]interface{}{ + "comp": autorest.Encode("query", "metadata"), + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + } + + headers = metadata.SetIntoHeaders(headers, metaData) + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/xml; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithQueryParameters(queryParameters), + autorest.WithHeaders(headers)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// SetMetaDataSender sends the SetMetaData request. The method will close the +// http.Response Body if it receives an error. +func (client Client) SetMetaDataSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// SetMetaDataResponder handles the response to the SetMetaData request. The method always +// closes the http.Response Body. +func (client Client) SetMetaDataResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/properties_get.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/properties_get.go new file mode 100644 index 000000000000..c6a0c399d2d7 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/properties_get.go @@ -0,0 +1,144 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type GetResult struct { + autorest.Response + + CacheControl string + ContentDisposition string + ContentEncoding string + ContentLanguage string + ContentLength *int64 + ContentMD5 string + ContentType string + CopyID string + CopyStatus string + CopySource string + CopyProgress string + CopyStatusDescription string + CopyCompletionTime string + Encrypted bool + + MetaData map[string]string +} + +// GetProperties returns the Properties for the specified file +func (client Client) GetProperties(ctx context.Context, accountName, shareName, path, fileName string) (result GetResult, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "GetProperties", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "GetProperties", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "GetProperties", "`shareName` must be a lower-cased string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "GetProperties", "`fileName` cannot be an empty string.") + } + + req, err := client.GetPropertiesPreparer(ctx, accountName, shareName, path, fileName) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "GetProperties", nil, "Failure preparing request") + return + } + + resp, err := client.GetPropertiesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "GetProperties", resp, "Failure sending request") + return + } + + result, err = client.GetPropertiesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "GetProperties", resp, "Failure responding to request") + return + } + + return +} + +// GetPropertiesPreparer prepares the GetProperties request. +func (client Client) GetPropertiesPreparer(ctx context.Context, accountName, shareName, path, fileName string) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/xml; charset=utf-8"), + autorest.AsHead(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithHeaders(headers)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetPropertiesSender sends the GetProperties request. The method will close the +// http.Response Body if it receives an error. +func (client Client) GetPropertiesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetPropertiesResponder handles the response to the GetProperties request. The method always +// closes the http.Response Body. +func (client Client) GetPropertiesResponder(resp *http.Response) (result GetResult, err error) { + if resp != nil && resp.Header != nil { + result.CacheControl = resp.Header.Get("Cache-Control") + result.ContentDisposition = resp.Header.Get("Content-Disposition") + result.ContentEncoding = resp.Header.Get("Content-Encoding") + result.ContentLanguage = resp.Header.Get("Content-Language") + result.ContentMD5 = resp.Header.Get("x-ms-content-md5") + result.ContentType = resp.Header.Get("Content-Type") + result.CopyID = resp.Header.Get("x-ms-copy-id") + result.CopyProgress = resp.Header.Get("x-ms-copy-progress") + result.CopySource = resp.Header.Get("x-ms-copy-source") + result.CopyStatus = resp.Header.Get("x-ms-copy-status") + result.CopyStatusDescription = resp.Header.Get("x-ms-copy-status-description") + result.CopyCompletionTime = resp.Header.Get("x-ms-copy-completion-time") + result.Encrypted = strings.EqualFold(resp.Header.Get("x-ms-server-encrypted"), "true") + result.MetaData = metadata.ParseFromHeaders(resp.Header) + + contentLengthRaw := resp.Header.Get("Content-Length") + if contentLengthRaw != "" { + contentLength, err := strconv.Atoi(contentLengthRaw) + if err != nil { + return result, fmt.Errorf("Error parsing %q for Content-Length as an integer: %s", contentLengthRaw, err) + } + contentLengthI64 := int64(contentLength) + result.ContentLength = &contentLengthI64 + } + } + + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/properties_set.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/properties_set.go new file mode 100644 index 000000000000..521b1bba7be7 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/properties_set.go @@ -0,0 +1,186 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" + "github.com/tombuildsstuff/giovanni/storage/internal/metadata" +) + +type SetPropertiesInput struct { + // Resizes a file to the specified size. + // If the specified byte value is less than the current size of the file, + // then all ranges above the specified byte value are cleared. + ContentLength int64 + + // Modifies the cache control string for the file. + // If this property is not specified on the request, then the property will be cleared for the file. + // Subsequent calls to Get File Properties will not return this property, + // unless it is explicitly set on the file again. + ContentControl *string + + // Sets the file’s Content-Disposition header. + // If this property is not specified on the request, then the property will be cleared for the file. + // Subsequent calls to Get File Properties will not return this property, + // unless it is explicitly set on the file again. + ContentDisposition *string + + // Sets the file's content encoding. + // If this property is not specified on the request, then the property will be cleared for the file. + // Subsequent calls to Get File Properties will not return this property, + // unless it is explicitly set on the file again. + ContentEncoding *string + + // Sets the file's content language. + // If this property is not specified on the request, then the property will be cleared for the file. + // Subsequent calls to Get File Properties will not return this property, + // unless it is explicitly set on the file again. + ContentLanguage *string + + // Sets the file's MD5 hash. + // If this property is not specified on the request, then the property will be cleared for the file. + // Subsequent calls to Get File Properties will not return this property, + // unless it is explicitly set on the file again. + ContentMD5 *string + + // Sets the file's content type. + // If this property is not specified on the request, then the property will be cleared for the file. + // Subsequent calls to Get File Properties will not return this property, + // unless it is explicitly set on the file again. + ContentType *string + + // The time at which this file was created at - if omitted, this'll be set to "now" + // This maps to the `x-ms-file-creation-time` field. + CreatedAt *time.Time + + // The time at which this file was last modified - if omitted, this'll be set to "now" + // This maps to the `x-ms-file-last-write-time` field. + LastModified *time.Time + + // MetaData is a mapping of key value pairs which should be assigned to this file + MetaData map[string]string +} + +// SetProperties sets the specified properties on the specified File +func (client Client) SetProperties(ctx context.Context, accountName, shareName, path, fileName string, input SetPropertiesInput) (result autorest.Response, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "SetProperties", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "SetProperties", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "SetProperties", "`shareName` must be a lower-cased string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "SetProperties", "`fileName` cannot be an empty string.") + } + + req, err := client.SetPropertiesPreparer(ctx, accountName, shareName, path, fileName, input) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "SetProperties", nil, "Failure preparing request") + return + } + + resp, err := client.SetPropertiesSender(req) + if err != nil { + result = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "SetProperties", resp, "Failure sending request") + return + } + + result, err = client.SetPropertiesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "SetProperties", resp, "Failure responding to request") + return + } + + return +} + +// SetPropertiesPreparer prepares the SetProperties request. +func (client Client) SetPropertiesPreparer(ctx context.Context, accountName, shareName, path, fileName string, input SetPropertiesInput) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + var coalesceDate = func(input *time.Time, defaultVal string) string { + if input == nil { + return defaultVal + } + + return input.Format(time.RFC1123) + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + "x-ms-type": "file", + + "x-ms-content-length": input.ContentLength, + "x-ms-file-permission": "inherit", // TODO: expose this in future + "x-ms-file-attributes": "None", // TODO: expose this in future + "x-ms-file-creation-time": coalesceDate(input.CreatedAt, "now"), + "x-ms-file-last-write-time": coalesceDate(input.LastModified, "now"), + } + + if input.ContentControl != nil { + headers["x-ms-cache-control"] = *input.ContentControl + } + if input.ContentDisposition != nil { + headers["x-ms-content-disposition"] = *input.ContentDisposition + } + if input.ContentEncoding != nil { + headers["x-ms-content-encoding"] = *input.ContentEncoding + } + if input.ContentLanguage != nil { + headers["x-ms-content-language"] = *input.ContentLanguage + } + if input.ContentMD5 != nil { + headers["x-ms-content-md5"] = *input.ContentMD5 + } + if input.ContentType != nil { + headers["x-ms-content-type"] = *input.ContentType + } + + headers = metadata.SetIntoHeaders(headers, input.MetaData) + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/xml; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithHeaders(headers)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// SetPropertiesSender sends the SetProperties request. The method will close the +// http.Response Body if it receives an error. +func (client Client) SetPropertiesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// SetPropertiesResponder handles the response to the SetProperties request. The method always +// closes the http.Response Body. +func (client Client) SetPropertiesResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated), + autorest.ByClosing()) + result = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_clear.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_clear.go new file mode 100644 index 000000000000..5d8145fae422 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_clear.go @@ -0,0 +1,112 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" +) + +type ClearByteRangeInput struct { + StartBytes int64 + EndBytes int64 +} + +// ClearByteRange clears the specified Byte Range from within the specified File +func (client Client) ClearByteRange(ctx context.Context, accountName, shareName, path, fileName string, input ClearByteRangeInput) (result autorest.Response, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "ClearByteRange", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "ClearByteRange", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "ClearByteRange", "`shareName` must be a lower-cased string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "ClearByteRange", "`fileName` cannot be an empty string.") + } + if input.StartBytes < 0 { + return result, validation.NewError("files.Client", "ClearByteRange", "`input.StartBytes` must be greater or equal to 0.") + } + if input.EndBytes <= 0 { + return result, validation.NewError("files.Client", "ClearByteRange", "`input.EndBytes` must be greater than 0.") + } + + req, err := client.ClearByteRangePreparer(ctx, accountName, shareName, path, fileName, input) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "ClearByteRange", nil, "Failure preparing request") + return + } + + resp, err := client.ClearByteRangeSender(req) + if err != nil { + result = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "ClearByteRange", resp, "Failure sending request") + return + } + + result, err = client.ClearByteRangeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "ClearByteRange", resp, "Failure responding to request") + return + } + + return +} + +// ClearByteRangePreparer prepares the ClearByteRange request. +func (client Client) ClearByteRangePreparer(ctx context.Context, accountName, shareName, path, fileName string, input ClearByteRangeInput) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + queryParameters := map[string]interface{}{ + "comp": autorest.Encode("query", "range"), + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + "x-ms-write": "clear", + "x-ms-range": fmt.Sprintf("bytes=%d-%d", input.StartBytes, input.EndBytes), + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/xml; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithHeaders(headers), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ClearByteRangeSender sends the ClearByteRange request. The method will close the +// http.Response Body if it receives an error. +func (client Client) ClearByteRangeSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ClearByteRangeResponder handles the response to the ClearByteRange request. The method always +// closes the http.Response Body. +func (client Client) ClearByteRangeResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated), + autorest.ByClosing()) + result = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_get.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_get.go new file mode 100644 index 000000000000..733d3f525105 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_get.go @@ -0,0 +1,121 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" +) + +type GetByteRangeInput struct { + StartBytes int64 + EndBytes int64 +} + +type GetByteRangeResult struct { + autorest.Response + + Contents []byte +} + +// GetByteRange returns the specified Byte Range from the specified File. +func (client Client) GetByteRange(ctx context.Context, accountName, shareName, path, fileName string, input GetByteRangeInput) (result GetByteRangeResult, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "GetByteRange", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "GetByteRange", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "GetByteRange", "`shareName` must be a lower-cased string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "GetByteRange", "`fileName` cannot be an empty string.") + } + if input.StartBytes < 0 { + return result, validation.NewError("files.Client", "GetByteRange", "`input.StartBytes` must be greater or equal to 0.") + } + if input.EndBytes <= 0 { + return result, validation.NewError("files.Client", "GetByteRange", "`input.EndBytes` must be greater than 0.") + } + expectedBytes := input.EndBytes - input.StartBytes + if expectedBytes < (4 * 1024) { + return result, validation.NewError("files.Client", "GetByteRange", "Requested Byte Range must be at least 4KB.") + } + if expectedBytes > (4 * 1024 * 1024) { + return result, validation.NewError("files.Client", "GetByteRange", "Requested Byte Range must be at most 4MB.") + } + + req, err := client.GetByteRangePreparer(ctx, accountName, shareName, path, fileName, input) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "GetByteRange", nil, "Failure preparing request") + return + } + + resp, err := client.GetByteRangeSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "GetByteRange", resp, "Failure sending request") + return + } + + result, err = client.GetByteRangeResponder(resp, expectedBytes) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "GetByteRange", resp, "Failure responding to request") + return + } + + return +} + +// GetByteRangePreparer prepares the GetByteRange request. +func (client Client) GetByteRangePreparer(ctx context.Context, accountName, shareName, path, fileName string, input GetByteRangeInput) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + "x-ms-range": fmt.Sprintf("bytes=%d-%d", input.StartBytes, input.EndBytes-1), + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithHeaders(headers)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetByteRangeSender sends the GetByteRange request. The method will close the +// http.Response Body if it receives an error. +func (client Client) GetByteRangeSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetByteRangeResponder handles the response to the GetByteRange request. The method always +// closes the http.Response Body. +func (client Client) GetByteRangeResponder(resp *http.Response, length int64) (result GetByteRangeResult, err error) { + result.Contents = make([]byte, length) + + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusPartialContent), + autorest.ByUnmarshallingBytes(&result.Contents), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_get_file.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_get_file.go new file mode 100644 index 000000000000..9e5be17f85fc --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_get_file.go @@ -0,0 +1,128 @@ +package files + +import ( + "context" + "fmt" + "log" + "math" + "runtime" + "sync" + + "github.com/Azure/go-autorest/autorest" +) + +// GetFile is a helper method to download a file by chunking it automatically +func (client Client) GetFile(ctx context.Context, accountName, shareName, path, fileName string, parallelism int) (result autorest.Response, outputBytes []byte, err error) { + + // first look up the file and check out how many bytes it is + file, e := client.GetProperties(ctx, accountName, shareName, path, fileName) + if err != nil { + result = file.Response + err = e + return + } + + if file.ContentLength == nil { + err = fmt.Errorf("Content-Length was nil!") + return + } + + length := int64(*file.ContentLength) + chunkSize := int64(4 * 1024 * 1024) // 4MB + + if chunkSize > length { + chunkSize = length + } + + // then split that up into chunks and retrieve it retrieve it into the 'results' set + chunks := int(math.Ceil(float64(length) / float64(chunkSize))) + workerCount := parallelism * runtime.NumCPU() + if workerCount > chunks { + workerCount = chunks + } + + var waitGroup sync.WaitGroup + waitGroup.Add(workerCount) + + results := make([]*downloadFileChunkResult, chunks) + errors := make(chan error, chunkSize) + + for i := 0; i < chunks; i++ { + go func(i int) { + log.Printf("[DEBUG] Downloading Chunk %d of %d", i+1, chunks) + + dfci := downloadFileChunkInput{ + thisChunk: i, + chunkSize: chunkSize, + fileSize: length, + } + + result, err := client.downloadFileChunk(ctx, accountName, shareName, path, fileName, dfci) + if err != nil { + errors <- err + waitGroup.Done() + return + } + + // if there's no error, we should have bytes, so this is safe + results[i] = result + + waitGroup.Done() + }(i) + } + waitGroup.Wait() + + // TODO: we should switch to hashicorp/multi-error here + if len(errors) > 0 { + err = fmt.Errorf("Error downloading file: %s", <-errors) + return + } + + // then finally put it all together, in order and return it + output := make([]byte, length) + for _, v := range results { + copy(output[v.startBytes:v.endBytes], v.bytes) + } + + outputBytes = output + return +} + +type downloadFileChunkInput struct { + thisChunk int + chunkSize int64 + fileSize int64 +} + +type downloadFileChunkResult struct { + startBytes int64 + endBytes int64 + bytes []byte +} + +func (client Client) downloadFileChunk(ctx context.Context, accountName, shareName, path, fileName string, input downloadFileChunkInput) (*downloadFileChunkResult, error) { + startBytes := input.chunkSize * int64(input.thisChunk) + endBytes := startBytes + input.chunkSize + + // the last chunk may exceed the size of the file + remaining := input.fileSize - startBytes + if input.chunkSize > remaining { + endBytes = startBytes + remaining + } + + getInput := GetByteRangeInput{ + StartBytes: startBytes, + EndBytes: endBytes, + } + result, err := client.GetByteRange(ctx, accountName, shareName, path, fileName, getInput) + if err != nil { + return nil, fmt.Errorf("Error putting bytes: %s", err) + } + + output := downloadFileChunkResult{ + startBytes: startBytes, + endBytes: endBytes, + bytes: result.Contents, + } + return &output, nil +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_put.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_put.go new file mode 100644 index 000000000000..208becc34bd1 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_put.go @@ -0,0 +1,130 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" +) + +type PutByteRangeInput struct { + StartBytes int64 + EndBytes int64 + + // Content is the File Contents for the specified range + // which can be at most 4MB + Content []byte +} + +// PutByteRange puts the specified Byte Range in the specified File. +func (client Client) PutByteRange(ctx context.Context, accountName, shareName, path, fileName string, input PutByteRangeInput) (result autorest.Response, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "PutByteRange", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "PutByteRange", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "PutByteRange", "`shareName` must be a lower-cased string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "PutByteRange", "`fileName` cannot be an empty string.") + } + if input.StartBytes < 0 { + return result, validation.NewError("files.Client", "PutByteRange", "`input.StartBytes` must be greater or equal to 0.") + } + if input.EndBytes <= 0 { + return result, validation.NewError("files.Client", "PutByteRange", "`input.EndBytes` must be greater than 0.") + } + + expectedBytes := input.EndBytes - input.StartBytes + actualBytes := len(input.Content) + if expectedBytes != int64(actualBytes) { + return result, validation.NewError("files.Client", "PutByteRange", fmt.Sprintf("The specified byte-range (%d) didn't match the content size (%d).", expectedBytes, actualBytes)) + } + if expectedBytes < (4 * 1024) { + return result, validation.NewError("files.Client", "PutByteRange", "Specified Byte Range must be at least 4KB.") + } + + if expectedBytes > (4 * 1024 * 1024) { + return result, validation.NewError("files.Client", "PutByteRange", "Specified Byte Range must be at most 4MB.") + } + + req, err := client.PutByteRangePreparer(ctx, accountName, shareName, path, fileName, input) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "PutByteRange", nil, "Failure preparing request") + return + } + + resp, err := client.PutByteRangeSender(req) + if err != nil { + result = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "PutByteRange", resp, "Failure sending request") + return + } + + result, err = client.PutByteRangeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "PutByteRange", resp, "Failure responding to request") + return + } + + return +} + +// PutByteRangePreparer prepares the PutByteRange request. +func (client Client) PutByteRangePreparer(ctx context.Context, accountName, shareName, path, fileName string, input PutByteRangeInput) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + queryParameters := map[string]interface{}{ + "comp": autorest.Encode("query", "range"), + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + "x-ms-write": "update", + "x-ms-range": fmt.Sprintf("bytes=%d-%d", input.StartBytes, input.EndBytes-1), + "Content-Length": int(len(input.Content)), + } + + preparer := autorest.CreatePreparer( + autorest.AsPut(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithHeaders(headers), + autorest.WithQueryParameters(queryParameters), + autorest.WithBytes(&input.Content)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PutByteRangeSender sends the PutByteRange request. The method will close the +// http.Response Body if it receives an error. +func (client Client) PutByteRangeSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// PutByteRangeResponder handles the response to the PutByteRange request. The method always +// closes the http.Response Body. +func (client Client) PutByteRangeResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated), + autorest.ByClosing()) + result = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_put_file.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_put_file.go new file mode 100644 index 000000000000..a39cd377cee8 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/range_put_file.go @@ -0,0 +1,107 @@ +package files + +import ( + "context" + "fmt" + "io" + "log" + "math" + "os" + "runtime" + "sync" + + "github.com/Azure/go-autorest/autorest" +) + +// PutFile is a helper method which takes a file, and automatically chunks it up, rather than having to do this yourself +func (client Client) PutFile(ctx context.Context, accountName, shareName, path, fileName string, file *os.File, parallelism int) error { + fileInfo, err := file.Stat() + if err != nil { + return fmt.Errorf("Error loading file info: %s", err) + } + + fileSize := fileInfo.Size() + chunkSize := 4 * 1024 * 1024 // 4MB + if chunkSize > int(fileSize) { + chunkSize = int(fileSize) + } + chunks := int(math.Ceil(float64(fileSize) / float64(chunkSize*1.0))) + + workerCount := parallelism * runtime.NumCPU() + if workerCount > chunks { + workerCount = chunks + } + + var waitGroup sync.WaitGroup + waitGroup.Add(workerCount) + errors := make(chan error, chunkSize) + + for i := 0; i < chunks; i++ { + go func(i int) { + log.Printf("[DEBUG] Chunk %d of %d", i+1, chunks) + + uci := uploadChunkInput{ + thisChunk: i, + chunkSize: chunkSize, + fileSize: fileSize, + } + + _, err := client.uploadChunk(ctx, accountName, shareName, path, fileName, uci, file) + if err != nil { + errors <- err + waitGroup.Done() + return + } + + waitGroup.Done() + return + }(i) + } + waitGroup.Wait() + + // TODO: we should switch to hashicorp/multi-error here + if len(errors) > 0 { + return fmt.Errorf("Error uploading file: %s", <-errors) + } + + return nil +} + +type uploadChunkInput struct { + thisChunk int + chunkSize int + fileSize int64 +} + +func (client Client) uploadChunk(ctx context.Context, accountName, shareName, path, fileName string, input uploadChunkInput, file *os.File) (result autorest.Response, err error) { + startBytes := int64(input.chunkSize * input.thisChunk) + endBytes := startBytes + int64(input.chunkSize) + + // the last size may exceed the size of the file + remaining := input.fileSize - startBytes + if int64(input.chunkSize) > remaining { + endBytes = startBytes + remaining + } + + bytesToRead := int(endBytes) - int(startBytes) + bytes := make([]byte, bytesToRead) + + _, err = file.ReadAt(bytes, startBytes) + if err != nil { + if err != io.EOF { + return result, fmt.Errorf("Error reading bytes: %s", err) + } + } + + putBytesInput := PutByteRangeInput{ + StartBytes: startBytes, + EndBytes: endBytes, + Content: bytes, + } + result, err = client.PutByteRange(ctx, accountName, shareName, path, fileName, putBytesInput) + if err != nil { + return result, fmt.Errorf("Error putting bytes: %s", err) + } + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/ranges_list.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/ranges_list.go new file mode 100644 index 000000000000..ea309f97ddb2 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/ranges_list.go @@ -0,0 +1,114 @@ +package files + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" +) + +type ListRangesResult struct { + autorest.Response + + Ranges []Range `xml:"Range"` +} + +type Range struct { + Start string `xml:"Start"` + End string `xml:"End"` +} + +// ListRanges returns the list of valid ranges for the specified File. +func (client Client) ListRanges(ctx context.Context, accountName, shareName, path, fileName string) (result ListRangesResult, err error) { + if accountName == "" { + return result, validation.NewError("files.Client", "ListRanges", "`accountName` cannot be an empty string.") + } + if shareName == "" { + return result, validation.NewError("files.Client", "ListRanges", "`shareName` cannot be an empty string.") + } + if strings.ToLower(shareName) != shareName { + return result, validation.NewError("files.Client", "ListRanges", "`shareName` must be a lower-cased string.") + } + if path == "" { + return result, validation.NewError("files.Client", "ListRanges", "`path` cannot be an empty string.") + } + if fileName == "" { + return result, validation.NewError("files.Client", "ListRanges", "`fileName` cannot be an empty string.") + } + + req, err := client.ListRangesPreparer(ctx, accountName, shareName, path, fileName) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "ListRanges", nil, "Failure preparing request") + return + } + + resp, err := client.ListRangesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "files.Client", "ListRanges", resp, "Failure sending request") + return + } + + result, err = client.ListRangesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "files.Client", "ListRanges", resp, "Failure responding to request") + return + } + + return +} + +// ListRangesPreparer prepares the ListRanges request. +func (client Client) ListRangesPreparer(ctx context.Context, accountName, shareName, path, fileName string) (*http.Request, error) { + if path != "" { + path = fmt.Sprintf("%s/", path) + } + pathParameters := map[string]interface{}{ + "shareName": autorest.Encode("path", shareName), + "directory": autorest.Encode("path", path), + "fileName": autorest.Encode("path", fileName), + } + + queryParameters := map[string]interface{}{ + "comp": autorest.Encode("query", "rangelist"), + } + + headers := map[string]interface{}{ + "x-ms-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/xml; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(endpoints.GetFileEndpoint(client.BaseURI, accountName)), + autorest.WithPathParameters("/{shareName}/{directory}{fileName}", pathParameters), + autorest.WithHeaders(headers), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListRangesSender sends the ListRanges request. The method will close the +// http.Response Body if it receives an error. +func (client Client) ListRangesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListRangesResponder handles the response to the ListRanges request. The method always +// closes the http.Response Body. +func (client Client) ListRangesResponder(resp *http.Response) (result ListRangesResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingXML(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + + return +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/resource_id.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/resource_id.go new file mode 100644 index 000000000000..f18e702e81d9 --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/resource_id.go @@ -0,0 +1,64 @@ +package files + +import ( + "fmt" + "net/url" + "strings" + + "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" +) + +// GetResourceID returns the Resource ID for the given File +// This can be useful when, for example, you're using this as a unique identifier +func (client Client) GetResourceID(accountName, shareName, directoryName, filePath string) string { + domain := endpoints.GetFileEndpoint(client.BaseURI, accountName) + return fmt.Sprintf("%s/%s/%s/%s", domain, shareName, directoryName, filePath) +} + +type ResourceID struct { + AccountName string + DirectoryName string + FileName string + ShareName string +} + +// ParseResourceID parses the specified Resource ID and returns an object +// which can be used to interact with Files within a Storage Share. +func ParseResourceID(id string) (*ResourceID, error) { + // example: https://account1.file.core.chinacloudapi.cn/share1/directory1/file1.txt + // example: https://account1.file.core.chinacloudapi.cn/share1/directory1/directory2/file1.txt + + if id == "" { + return nil, fmt.Errorf("`id` was empty") + } + + uri, err := url.Parse(id) + if err != nil { + return nil, fmt.Errorf("Error parsing ID as a URL: %s", err) + } + + accountName, err := endpoints.GetAccountNameFromEndpoint(uri.Host) + if err != nil { + return nil, fmt.Errorf("Error parsing Account Name: %s", err) + } + + path := strings.TrimPrefix(uri.Path, "/") + segments := strings.Split(path, "/") + if len(segments) == 0 { + return nil, fmt.Errorf("Expected the path to contain segments but got none") + } + + shareName := segments[0] + fileName := segments[len(segments)-1] + + directoryName := strings.TrimPrefix(path, shareName) + directoryName = strings.TrimPrefix(directoryName, "/") + directoryName = strings.TrimSuffix(directoryName, fileName) + directoryName = strings.TrimSuffix(directoryName, "/") + return &ResourceID{ + AccountName: *accountName, + ShareName: shareName, + DirectoryName: directoryName, + FileName: fileName, + }, nil +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/version.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/version.go new file mode 100644 index 000000000000..90529be1118f --- /dev/null +++ b/vendor/github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files/version.go @@ -0,0 +1,14 @@ +package files + +import ( + "fmt" + + "github.com/tombuildsstuff/giovanni/version" +) + +// APIVersion is the version of the API used for all Storage API Operations +const APIVersion = "2019-12-12" + +func UserAgent() string { + return fmt.Sprintf("tombuildsstuff/giovanni/%s storage/%s", version.Number, APIVersion) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index d248f898035b..87f2817392e3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -376,6 +376,7 @@ github.com/tombuildsstuff/giovanni/storage/2019-12-12/blob/containers github.com/tombuildsstuff/giovanni/storage/2019-12-12/datalakestore/filesystems github.com/tombuildsstuff/giovanni/storage/2019-12-12/datalakestore/paths github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/directories +github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/files github.com/tombuildsstuff/giovanni/storage/2019-12-12/file/shares github.com/tombuildsstuff/giovanni/storage/2019-12-12/queue/queues github.com/tombuildsstuff/giovanni/storage/2019-12-12/table/entities diff --git a/website/azurerm.erb b/website/azurerm.erb index 5250d564dbb5..7e5c8c80cac3 100644 --- a/website/azurerm.erb +++ b/website/azurerm.erb @@ -2916,6 +2916,10 @@ azurerm_storage_share_directory +
  • + azurerm_storage_share_file +
  • +
  • azurerm_storage_sync
  • diff --git a/website/docs/r/storage_share_file.html.markdown b/website/docs/r/storage_share_file.html.markdown new file mode 100644 index 000000000000..7368176e2476 --- /dev/null +++ b/website/docs/r/storage_share_file.html.markdown @@ -0,0 +1,85 @@ +--- +subcategory: "Storage" +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_storage_share_file" +description: |- + Manages a File within an Azure Storage File Share. +--- + +# azurerm_storage_share_file + +Manages a File within an Azure Storage File Share. + +## Example Usage + +```hcl +resource "azurerm_resource_group" "example" { + name = "example-resources" + location = "West Europe" +} + +resource "azurerm_storage_account" "example" { + name = "azureteststorage" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + account_tier = "Standard" + account_replication_type = "LRS" +} + +resource "azurerm_storage_share" "example" { + name = "sharename" + storage_account_name = azurerm_storage_account.example.name + quota = 50 +} + +resource "azurerm_storage_share_file" "example" { + name = "my-awesome-content.zip" + storage_share_id = azurerm_storage_share.example.id + source = "some-local-file.zip" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name (or path) of the File that should be created within this File Share. Changing this forces a new resource to be created. + +* `storage_share_id` - (Required) The Storage Share ID in which this file will be placed into. Changing this forces a new resource to be created. + +* `directory_name` - (Optional) The storage share directory that you would like the file placed into. Changing this forces a new resource to be created. + +* `source` - (Optional) An absolute path to a file on the local system. + +* `content_type` - (Optional) The content type of the share file. Defaults to `application/octet-stream`. + +* `content_md5` - (Optional) The MD5 sum of the file contents. Changing this forces a new resource to be created. + +* `content_encoding` - (Optional) Specifies which content encodings have been applied to the file. + +* `content_disposition` - (Optional) Sets the file’s Content-Disposition header. + +* `metadata` - (Optional) A mapping of metadata to assign to this file. + +## Attributes Reference + +The following attributes are exported in addition to the arguments listed above: + +* `id` - The ID of the file within the File Share. + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: + +* `create` - (Defaults to 30 minutes) Used when creating the Storage Share File. +* `update` - (Defaults to 30 minutes) Used when updating the Storage Share File. +* `read` - (Defaults to 5 minutes) Used when retrieving the Storage Share File. +* `delete` - (Defaults to 30 minutes) Used when deleting the Storage Share File. + +## Import + +Directories within an Azure Storage File Share can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_storage_share_file.example https://account1.file.core.windows.net/share1/file1 +```