diff --git a/eng/config.json b/eng/config.json index f4db5206b977..74f46a216e79 100644 --- a/eng/config.json +++ b/eng/config.json @@ -28,6 +28,10 @@ "Name": "azqueue", "CoverageGoal": 0.60 }, + { + "Name": "azfile", + "CoverageGoal": 0.75 + }, { "Name": "aztemplate", "CoverageGoal": 0.50 diff --git a/sdk/storage/azfile/CHANGELOG.md b/sdk/storage/azfile/CHANGELOG.md new file mode 100644 index 000000000000..04f97b45434f --- /dev/null +++ b/sdk/storage/azfile/CHANGELOG.md @@ -0,0 +1,7 @@ +# Release History + +## 0.1.0 (Unreleased) + +### Features Added + +* This is the initial preview release of the `azfile` library diff --git a/sdk/storage/azfile/LICENSE.txt b/sdk/storage/azfile/LICENSE.txt new file mode 100644 index 000000000000..d1ca00f20a89 --- /dev/null +++ b/sdk/storage/azfile/LICENSE.txt @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/sdk/storage/azfile/README.md b/sdk/storage/azfile/README.md new file mode 100644 index 000000000000..013c2d022248 --- /dev/null +++ b/sdk/storage/azfile/README.md @@ -0,0 +1,266 @@ +# Azure File Storage SDK for Go + +> Service Version: 2020-10-02 + +Azure File Shares offers fully managed file shares in the cloud that are accessible via the industry standard +[Server Message Block (SMB) protocol](https://docs.microsoft.com/windows/desktop/FileIO/microsoft-smb-protocol-and-cifs-protocol-overview). +Azure file shares can be mounted concurrently by cloud or on-premises deployments of Windows, Linux, and macOS. +Additionally, Azure file shares can be cached on Windows Servers with Azure File Sync for fast access near where the data is being used. + +[Source code][source] | [API reference documentation][docs] | [REST API documentation][rest_docs] | [Product documentation][product_docs] + +## Getting started + +### Install the package + +Install the Azure File Storage SDK for Go with [go get][goget]: + +```Powershell +go get github.com/Azure/azure-sdk-for-go/sdk/storage/azfile +``` + +### Prerequisites + +A supported [Go][godevdl] version (the Azure SDK supports the two most recent Go releases). + +You need an [Azure subscription][azure_sub] and a +[Storage Account][storage_account_docs] to use this package. + +To create a new Storage Account, you can use the [Azure Portal][storage_account_create_portal], +[Azure PowerShell][storage_account_create_ps], or the [Azure CLI][storage_account_create_cli]. +Here's an example using the Azure CLI: + +```Powershell +az storage account create --name MyStorageAccount --resource-group MyResourceGroup --location westus --sku Standard_LRS +``` + +### Authenticate the client + +The Azure File Storage SDK for Go allows you to interact with four types of resources: the storage +account itself, file shares, directories, and files. Interaction with these resources starts with an instance of a +client. To create a client object, you will need the storage account's file service URL and a +credential that allows you to access the storage account: + +```go +// create a credential for authenticating using shared key +cred, err := service.NewSharedKeyCredential("", "") +// TODO: handle err + +// create service.Client for the specified storage account that uses the above credential +client, err := service.NewClientWithSharedKeyCredential("https://.file.core.windows.net/", cred, nil) +// TODO: handle err +``` + +## Key concepts + +Azure file shares can be used to: + +- Completely replace or supplement traditional on-premises file servers or NAS devices. +- "Lift and shift" applications to the cloud that expect a file share to store file application or user data. +- Simplify new cloud development projects with shared application settings, diagnostic shares, and Dev/Test/Debug tool file shares. + +### Goroutine safety +We guarantee that all client instance methods are goroutine-safe and independent of each other ([guideline](https://azure.github.io/azure-sdk/golang_introduction.html#thread-safety)). This ensures that the recommendation of reusing client instances is always safe, even across goroutines. + +### Additional concepts + +[Client options](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy#ClientOptions) | +[Accessing the response](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime#WithCaptureResponse) | +[Handling failures](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError) | +[Logging](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/log) + + +## Examples + +### Create a share and upload a file + +```go +const ( +shareName = "sample-share" +dirName = "sample-dir" +fileName = "sample-file" +) + +// Get a connection string to our Azure Storage account. You can +// obtain your connection string from the Azure Portal (click +// Access Keys under Settings in the Portal Storage account blade) +// or using the Azure CLI with: +// +// az storage account show-connection-string --name --resource-group +// +// And you can provide the connection string to your application +// using an environment variable. +connectionString := "" + +// Path to the local file to upload +localFilePath := "" + +// Get reference to a share and create it +shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) +// TODO: handle error +_, err = shareClient.Create(context.TODO(), nil) +// TODO: handle error + +// Get reference to a directory and create it +dirClient := shareClient.NewDirectoryClient(dirName) +_, err = dirClient.Create(context.TODO(), nil) +// TODO: handle error + +// open the file for reading +file, err := os.OpenFile(localFilePath, os.O_RDONLY, 0) +// TODO: handle error +defer file.Close() + +// get the size of file +fInfo, err := file.Stat() +// TODO: handle error +fSize := fInfo.Size() + +// create the file +fClient := dirClient.NewFileClient(fileName) +_, err = fClient.Create(context.TODO(), fSize, nil) +// TODO: handle error + +// upload the file +err = fClient.UploadFile(context.TODO(), file, nil) +// TODO: handle error +``` + +### Download a file + +```go +const ( +shareName = "sample-share" +dirName = "sample-dir" +fileName = "sample-file" +) + +connectionString := "" + +// Path to the save the downloaded file +localFilePath := "" + +// Get reference to the share +shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) +// TODO: handle error + +// Get reference to the directory +dirClient := shareClient.NewDirectoryClient(dirName) + +// Get reference to the file +fClient := dirClient.NewFileClient(fileName) + +// create or open a local file where we can download the Azure File +file, err := os.Create(localFilePath) +// TODO: handle error +defer file.Close() + +// Download the file +_, err = fClient.DownloadFile(context.TODO(), file, nil) +// TODO: handle error +``` + +### Traverse a share + +```go +const shareName = "sample-share" + +connectionString := "" + +// Get reference to the share +shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) +// TODO: handle error + +// Track the remaining directories to walk, starting from the root +var dirs []*directory.Client +dirs = append(dirs, shareClient.NewRootDirectoryClient()) +for len(dirs) > 0 { + dirClient := dirs[0] + dirs = dirs[1:] + + // Get all the next directory's files and subdirectories + pager := dirClient.NewListFilesAndDirectoriesPager(nil) + for pager.More() { + resp, err := pager.NextPage(context.TODO()) + // TODO: handle error + + for _, d := range resp.Segment.Directories { + fmt.Println(*d.Name) + // Keep walking down directories + dirs = append(dirs, dirClient.NewSubdirectoryClient(*d.Name)) + } + + for _, f := range resp.Segment.Files { + fmt.Println(*f.Name) + } + } +} +``` + +## Troubleshooting + +All File service operations will return an +[*azcore.ResponseError][azcore_response_error] on failure with a +populated `ErrorCode` field. Many of these errors are recoverable. +The [fileerror][file_error] package provides the possible Storage error codes +along with various helper facilities for error handling. + +```go +const ( + connectionString = "" + shareName = "sample-share" +) + +// create a client with the provided connection string +client, err := service.NewClientFromConnectionString(connectionString, nil) +// TODO: handle error + +// try to delete the share, avoiding any potential race conditions with an in-progress or completed deletion +_, err = client.DeleteShare(context.TODO(), shareName, nil) + +if fileerror.HasCode(err, fileerror.ShareBeingDeleted, fileerror.ShareNotFound) { + // ignore any errors if the share is being deleted or already has been deleted +} else if err != nil { + // TODO: some other error +} +``` + +## Next steps + +Get started with our [File samples][samples]. They contain complete examples of the above snippets and more. + +## Contributing + +See the [Storage CONTRIBUTING.md][storage_contrib] for details on building, +testing, and contributing to this library. + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. For +details, visit [cla.microsoft.com][cla]. + +This project has adopted the [Microsoft Open Source Code of Conduct][coc]. +For more information see the [Code of Conduct FAQ][coc_faq] +or contact [opencode@microsoft.com][coc_contact] with any +additional questions or comments. + + +[source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage +[docs]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azfile +[rest_docs]: https://docs.microsoft.com/rest/api/storageservices/file-service-rest-api +[product_docs]: https://docs.microsoft.com/azure/storage/files/storage-files-introduction +[godevdl]: https://go.dev/dl/ +[goget]: https://pkg.go.dev/cmd/go#hdr-Add_dependencies_to_current_module_and_install_them +[storage_account_docs]: https://docs.microsoft.com/azure/storage/common/storage-account-overview +[storage_account_create_ps]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-powershell +[storage_account_create_cli]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-cli +[storage_account_create_portal]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal +[azure_sub]: https://azure.microsoft.com/free/ +[azcore_response_error]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError +[file_error]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage +[samples]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage +[storage_contrib]: https://github.com/Azure/azure-sdk-for-go/blob/main/CONTRIBUTING.md +[cla]: https://cla.microsoft.com +[coc]: https://opensource.microsoft.com/codeofconduct/ +[coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ +[coc_contact]: mailto:opencode@microsoft.com diff --git a/sdk/storage/azfile/assets.json b/sdk/storage/azfile/assets.json new file mode 100644 index 000000000000..47d08f9c3faa --- /dev/null +++ b/sdk/storage/azfile/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "go", + "TagPrefix": "go/storage/azfile", + "Tag": "go/storage/azfile_f1e8c5b99b" +} diff --git a/sdk/storage/azfile/ci.yml b/sdk/storage/azfile/ci.yml new file mode 100644 index 000000000000..4978a37820df --- /dev/null +++ b/sdk/storage/azfile/ci.yml @@ -0,0 +1,33 @@ +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/storage/azfile + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/storage/azfile + + +stages: + - template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: 'storage/azfile' + RunLiveTests: true + EnvVars: + AZURE_CLIENT_ID: $(AZFILE_CLIENT_ID) + AZURE_TENANT_ID: $(AZFILE_TENANT_ID) + AZURE_CLIENT_SECRET: $(AZFILE_CLIENT_SECRET) + AZURE_SUBSCRIPTION_ID: $(AZFILE_SUBSCRIPTION_ID) diff --git a/sdk/storage/azfile/directory/client.go b/sdk/storage/azfile/directory/client.go new file mode 100644 index 000000000000..6ea9713d0f20 --- /dev/null +++ b/sdk/storage/azfile/directory/client.go @@ -0,0 +1,205 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package directory + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" + "net/http" + "net/url" + "strings" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions base.ClientOptions + +// Client represents a URL to the Azure Storage directory allowing you to manipulate its directories and files. +type Client base.Client[generated.DirectoryClient] + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a directory or with a shared access signature (SAS) token. +// - directoryURL - the URL of the directory e.g. https://.file.core.windows.net/share/directory? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(directoryURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewDirectoryClient(directoryURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - directoryURL - the URL of the directory e.g. https://.file.core.windows.net/share/directory +// - cred - a SharedKeyCredential created with the matching directory's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(directoryURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewDirectoryClient(directoryURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - shareName - the name of the share within the storage account +// - directoryPath - the path of the directory within the share +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, shareName string, directoryPath string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + + directoryPath = strings.ReplaceAll(directoryPath, "\\", "/") + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, shareName, directoryPath) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (d *Client) generated() *generated.DirectoryClient { + return base.InnerClient((*base.Client[generated.DirectoryClient])(d)) +} + +func (d *Client) sharedKey() *SharedKeyCredential { + return base.SharedKey((*base.Client[generated.DirectoryClient])(d)) +} + +// URL returns the URL endpoint used by the Client object. +func (d *Client) URL() string { + return d.generated().Endpoint() +} + +// NewSubdirectoryClient creates a new Client object by concatenating subDirectoryName to the end of this Client's URL. +// The new subdirectory Client uses the same request policy pipeline as the parent directory Client. +func (d *Client) NewSubdirectoryClient(subDirectoryName string) *Client { + subDirectoryName = url.PathEscape(subDirectoryName) + subDirectoryURL := runtime.JoinPaths(d.URL(), subDirectoryName) + return (*Client)(base.NewDirectoryClient(subDirectoryURL, d.generated().Pipeline(), d.sharedKey())) +} + +// NewFileClient creates a new file.Client object by concatenating fileName to the end of this Client's URL. +// The new file.Client uses the same request policy pipeline as the Client. +func (d *Client) NewFileClient(fileName string) *file.Client { + fileName = url.PathEscape(fileName) + fileURL := runtime.JoinPaths(d.URL(), fileName) + return (*file.Client)(base.NewFileClient(fileURL, d.generated().Pipeline(), d.sharedKey())) +} + +// Create operation creates a new directory under the specified share or parent directory. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/create-directory. +func (d *Client) Create(ctx context.Context, options *CreateOptions) (CreateResponse, error) { + fileAttributes, fileCreationTime, fileLastWriteTime, opts := options.format() + resp, err := d.generated().Create(ctx, fileAttributes, fileCreationTime, fileLastWriteTime, opts) + return resp, err +} + +// Delete operation removes the specified empty directory. Note that the directory must be empty before it can be deleted. +// Deleting directories that aren't empty returns error 409 (Directory Not Empty). +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/delete-directory. +func (d *Client) Delete(ctx context.Context, options *DeleteOptions) (DeleteResponse, error) { + opts := options.format() + resp, err := d.generated().Delete(ctx, opts) + return resp, err +} + +// GetProperties operation returns all system properties for the specified directory, and it can also be used to check the existence of a directory. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-directory-properties. +func (d *Client) GetProperties(ctx context.Context, options *GetPropertiesOptions) (GetPropertiesResponse, error) { + opts := options.format() + resp, err := d.generated().GetProperties(ctx, opts) + return resp, err +} + +// SetProperties operation sets system properties for the specified directory. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-directory-properties. +func (d *Client) SetProperties(ctx context.Context, options *SetPropertiesOptions) (SetPropertiesResponse, error) { + fileAttributes, fileCreationTime, fileLastWriteTime, opts := options.format() + resp, err := d.generated().SetProperties(ctx, fileAttributes, fileCreationTime, fileLastWriteTime, opts) + return resp, err +} + +// SetMetadata operation sets user-defined metadata for the specified directory. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-directory-metadata. +func (d *Client) SetMetadata(ctx context.Context, options *SetMetadataOptions) (SetMetadataResponse, error) { + opts := options.format() + resp, err := d.generated().SetMetadata(ctx, opts) + return resp, err +} + +// ForceCloseHandles operation closes a handle or handles opened on a directory. +// - handleID - Specifies the handle ID to be closed. Use an asterisk (*) as a wildcard string to specify all handles. +// +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/force-close-handles. +func (d *Client) ForceCloseHandles(ctx context.Context, handleID string, options *ForceCloseHandlesOptions) (ForceCloseHandlesResponse, error) { + opts := options.format() + resp, err := d.generated().ForceCloseHandles(ctx, handleID, opts) + return resp, err +} + +// ListHandles operation returns a list of open handles on a directory. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/list-handles. +func (d *Client) ListHandles(ctx context.Context, options *ListHandlesOptions) (ListHandlesResponse, error) { + opts := options.format() + resp, err := d.generated().ListHandles(ctx, opts) + return resp, err +} + +// NewListFilesAndDirectoriesPager operation returns a pager for the files and directories starting from the specified Marker. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/list-directories-and-files. +func (d *Client) NewListFilesAndDirectoriesPager(options *ListFilesAndDirectoriesOptions) *runtime.Pager[ListFilesAndDirectoriesResponse] { + listOptions := generated.DirectoryClientListFilesAndDirectoriesSegmentOptions{} + if options != nil { + listOptions.Include = options.Include.format() + listOptions.IncludeExtendedInfo = options.IncludeExtendedInfo + listOptions.Marker = options.Marker + listOptions.Maxresults = options.MaxResults + listOptions.Prefix = options.Prefix + listOptions.Sharesnapshot = options.ShareSnapshot + } + + return runtime.NewPager(runtime.PagingHandler[ListFilesAndDirectoriesResponse]{ + More: func(page ListFilesAndDirectoriesResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *ListFilesAndDirectoriesResponse) (ListFilesAndDirectoriesResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = d.generated().ListFilesAndDirectoriesSegmentCreateRequest(ctx, &listOptions) + } else { + listOptions.Marker = page.NextMarker + req, err = d.generated().ListFilesAndDirectoriesSegmentCreateRequest(ctx, &listOptions) + } + if err != nil { + return ListFilesAndDirectoriesResponse{}, err + } + resp, err := d.generated().Pipeline().Do(req) + if err != nil { + return ListFilesAndDirectoriesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListFilesAndDirectoriesResponse{}, runtime.NewResponseError(resp) + } + return d.generated().ListFilesAndDirectoriesSegmentHandleResponse(resp) + }, + }) +} diff --git a/sdk/storage/azfile/directory/client_test.go b/sdk/storage/azfile/directory/client_test.go new file mode 100644 index 000000000000..4e91138e9aad --- /dev/null +++ b/sdk/storage/azfile/directory/client_test.go @@ -0,0 +1,1117 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package directory_test + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/directory" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/testcommon" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "testing" + "time" +) + +func Test(t *testing.T) { + recordMode := recording.GetRecordMode() + t.Logf("Running directory Tests in %s mode\n", recordMode) + if recordMode == recording.LiveMode { + suite.Run(t, &DirectoryRecordedTestsSuite{}) + suite.Run(t, &DirectoryUnrecordedTestsSuite{}) + } else if recordMode == recording.PlaybackMode { + suite.Run(t, &DirectoryRecordedTestsSuite{}) + } else if recordMode == recording.RecordingMode { + suite.Run(t, &DirectoryRecordedTestsSuite{}) + } +} + +func (d *DirectoryRecordedTestsSuite) BeforeTest(suite string, test string) { + testcommon.BeforeTest(d.T(), suite, test) +} + +func (d *DirectoryRecordedTestsSuite) AfterTest(suite string, test string) { + testcommon.AfterTest(d.T(), suite, test) +} + +func (d *DirectoryUnrecordedTestsSuite) BeforeTest(suite string, test string) { + +} + +func (d *DirectoryUnrecordedTestsSuite) AfterTest(suite string, test string) { + +} + +type DirectoryRecordedTestsSuite struct { + suite.Suite +} + +type DirectoryUnrecordedTestsSuite struct { + suite.Suite +} + +func (d *DirectoryRecordedTestsSuite) TestDirNewDirectoryClient() { + _require := require.New(d.T()) + testName := d.T().Name() + + accountName, _ := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := svcClient.NewShareClient(shareName) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := shareClient.NewDirectoryClient(dirName) + + subDirName := "inner" + dirName + subDirClient := dirClient.NewSubdirectoryClient(subDirName) + + correctURL := "https://" + accountName + ".file.core.windows.net/" + shareName + "/" + dirName + "/" + subDirName + _require.Equal(subDirClient.URL(), correctURL) +} + +func (d *DirectoryRecordedTestsSuite) TestDirCreateFileURL() { + _require := require.New(d.T()) + testName := d.T().Name() + + accountName, _ := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := svcClient.NewShareClient(shareName) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := shareClient.NewDirectoryClient(dirName) + + fileName := testcommon.GenerateFileName(testName) + fileClient := dirClient.NewFileClient(fileName) + + correctURL := "https://" + accountName + ".file.core.windows.net/" + shareName + "/" + dirName + "/" + fileName + _require.Equal(fileClient.URL(), correctURL) +} + +func (d *DirectoryRecordedTestsSuite) TestDirectoryCreateUsingSharedKey() { + _require := require.New(d.T()) + testName := d.T().Name() + + cred, err := testcommon.GetGenericSharedKeyCredential(testcommon.TestAccountDefault) + _require.NoError(err) + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirURL := "https://" + cred.AccountName() + ".file.core.windows.net/" + shareName + "/" + dirName + + options := &directory.ClientOptions{} + testcommon.SetClientOptions(d.T(), &options.ClientOptions) + dirClient, err := directory.NewClientWithSharedKeyCredential(dirURL, cred, options) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.NoError(err) + _require.NotNil(resp.ETag) + _require.NotNil(resp.RequestID) + _require.Equal(resp.LastModified.IsZero(), false) + _require.Equal(resp.FileCreationTime.IsZero(), false) + _require.Equal(resp.FileLastWriteTime.IsZero(), false) + _require.Equal(resp.FileChangeTime.IsZero(), false) +} + +func (d *DirectoryRecordedTestsSuite) TestDirectoryCreateUsingConnectionString() { + _require := require.New(d.T()) + testName := d.T().Name() + + connString, err := testcommon.GetGenericConnectionString(testcommon.TestAccountDefault) + _require.NoError(err) + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + options := &directory.ClientOptions{} + testcommon.SetClientOptions(d.T(), &options.ClientOptions) + dirClient, err := directory.NewClientFromConnectionString(*connString, shareName, dirName, options) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.NoError(err) + _require.NotNil(resp.ETag) + _require.NotNil(resp.RequestID) + _require.Equal(resp.LastModified.IsZero(), false) + _require.Equal(resp.FileCreationTime.IsZero(), false) + _require.Equal(resp.FileLastWriteTime.IsZero(), false) + _require.Equal(resp.FileChangeTime.IsZero(), false) + + innerDirName1 := "innerdir1" + dirPath := dirName + "/" + innerDirName1 + dirClient1, err := directory.NewClientFromConnectionString(*connString, shareName, dirPath, options) + _require.NoError(err) + + resp, err = dirClient1.Create(context.Background(), nil) + _require.NoError(err) + _require.NotNil(resp.RequestID) + _require.Equal(resp.LastModified.IsZero(), false) + _require.Equal(resp.FileCreationTime.IsZero(), false) + + innerDirName2 := "innerdir2" + // using '\' as path separator between directories + dirPath = dirName + "\\" + innerDirName1 + "\\" + innerDirName2 + dirClient2, err := directory.NewClientFromConnectionString(*connString, shareName, dirPath, options) + _require.NoError(err) + + resp, err = dirClient2.Create(context.Background(), nil) + _require.NoError(err) + _require.NotNil(resp.RequestID) + _require.Equal(resp.LastModified.IsZero(), false) + _require.Equal(resp.FileCreationTime.IsZero(), false) +} + +func (d *DirectoryRecordedTestsSuite) TestDirectoryCreateNegativeMultiLevel() { + _require := require.New(d.T()) + testName := d.T().Name() + + connString, err := testcommon.GetGenericConnectionString(testcommon.TestAccountDefault) + _require.NoError(err) + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + // dirPath where parent dir does not exist + dirPath := "a/b/c/d/" + dirName + options := &directory.ClientOptions{} + testcommon.SetClientOptions(d.T(), &options.ClientOptions) + dirClient, err := directory.NewClientFromConnectionString(*connString, shareName, dirPath, options) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Error(err) + _require.Nil(resp.RequestID) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ParentNotFound) +} + +func (d *DirectoryRecordedTestsSuite) TestDirCreateDeleteDefault() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := shareClient.NewDirectoryClient(dirName) + _require.NoError(err) + + cResp, err := dirClient.Create(context.Background(), nil) + _require.NoError(err) + _require.NotNil(cResp.RequestID) + _require.NotNil(cResp.ETag) + _require.Equal(cResp.Date.IsZero(), false) + _require.Equal(cResp.LastModified.IsZero(), false) + _require.Equal(cResp.FileCreationTime.IsZero(), false) + _require.Equal(cResp.FileLastWriteTime.IsZero(), false) + _require.Equal(cResp.FileChangeTime.IsZero(), false) + + gResp, err := dirClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.ETag) + _require.Equal(gResp.Date.IsZero(), false) + _require.Equal(gResp.LastModified.IsZero(), false) + _require.Equal(gResp.FileCreationTime.IsZero(), false) + _require.Equal(gResp.FileLastWriteTime.IsZero(), false) + _require.Equal(gResp.FileChangeTime.IsZero(), false) +} + +func (d *DirectoryRecordedTestsSuite) TestDirSetPropertiesDefault() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := testcommon.GetDirectoryClient(dirName, shareClient) + + cResp, err := dirClient.Create(context.Background(), nil) + _require.NoError(err) + _require.NotNil(cResp.FilePermissionKey) + + // Set the custom permissions + sResp, err := dirClient.SetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(sResp.FileCreationTime) + _require.NotNil(sResp.FileLastWriteTime) + _require.NotNil(sResp.FilePermissionKey) + _require.Equal(*sResp.FilePermissionKey, *cResp.FilePermissionKey) + + gResp, err := dirClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(gResp.FileCreationTime) + _require.NotNil(gResp.FileLastWriteTime) + _require.NotNil(gResp.FilePermissionKey) + _require.Equal(*gResp.FilePermissionKey, *sResp.FilePermissionKey) + _require.Equal(*gResp.FileCreationTime, *sResp.FileCreationTime) + _require.Equal(*gResp.FileLastWriteTime, *sResp.FileLastWriteTime) + _require.Equal(*gResp.FileAttributes, *sResp.FileAttributes) +} + +func (d *DirectoryRecordedTestsSuite) TestDirSetPropertiesNonDefault() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := testcommon.GetDirectoryClient(dirName, shareClient) + + cResp, err := dirClient.Create(context.Background(), nil) + _require.NoError(err) + _require.NotNil(cResp.FilePermissionKey) + + currTime, err := time.Parse(time.UnixDate, "Fri Mar 31 21:00:00 GMT 2023") + _require.NoError(err) + creationTime := currTime.Add(5 * time.Minute).Round(time.Microsecond) + lastWriteTime := currTime.Add(10 * time.Minute).Round(time.Millisecond) + + // Set the custom permissions + sResp, err := dirClient.SetProperties(context.Background(), &directory.SetPropertiesOptions{ + FileSMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{ + ReadOnly: true, + System: true, + }, + CreationTime: &creationTime, + LastWriteTime: &lastWriteTime, + }, + FilePermissions: &file.Permissions{ + Permission: &testcommon.SampleSDDL, + }, + }) + _require.NoError(err) + _require.NotNil(sResp.FileCreationTime) + _require.NotNil(sResp.FileLastWriteTime) + _require.NotNil(sResp.FilePermissionKey) + _require.NotEqual(*sResp.FilePermissionKey, *cResp.FilePermissionKey) + _require.Equal(*sResp.FileCreationTime, creationTime.UTC()) + _require.Equal(*sResp.FileLastWriteTime, lastWriteTime.UTC()) + + gResp, err := dirClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(gResp.FileCreationTime) + _require.NotNil(gResp.FileLastWriteTime) + _require.NotNil(gResp.FilePermissionKey) + _require.Equal(*gResp.FilePermissionKey, *sResp.FilePermissionKey) + _require.Equal(*gResp.FileCreationTime, *sResp.FileCreationTime) + _require.Equal(*gResp.FileLastWriteTime, *sResp.FileLastWriteTime) + _require.Equal(*gResp.FileAttributes, *sResp.FileAttributes) +} + +func (d *DirectoryUnrecordedTestsSuite) TestDirCreateDeleteNonDefault() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := testcommon.GetDirectoryClient(dirName, shareClient) + + md := map[string]*string{ + "Foo": to.Ptr("FooValuE"), + "Bar": to.Ptr("bArvaLue"), + } + + cResp, err := dirClient.Create(context.Background(), &directory.CreateOptions{ + Metadata: md, + FileSMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{None: true}, + CreationTime: to.Ptr(time.Now().Add(5 * time.Minute)), + LastWriteTime: to.Ptr(time.Now().Add(10 * time.Minute)), + }, + FilePermissions: &file.Permissions{ + Permission: &testcommon.SampleSDDL, + }, + }) + _require.NoError(err) + _require.NotNil(cResp.FilePermissionKey) + _require.Equal(cResp.Date.IsZero(), false) + _require.NotNil(cResp.ETag) + _require.Equal(cResp.LastModified.IsZero(), false) + _require.NotNil(cResp.RequestID) + + gResp, err := dirClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.FilePermissionKey, *cResp.FilePermissionKey) + _require.EqualValues(gResp.Metadata, md) + + // Creating again will result in 409 and ResourceAlreadyExists. + _, err = dirClient.Create(context.Background(), &directory.CreateOptions{Metadata: md}) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ResourceAlreadyExists) + + dResp, err := dirClient.Delete(context.Background(), nil) + _require.NoError(err) + _require.Equal(dResp.Date.IsZero(), false) + _require.NotNil(dResp.RequestID) + _require.NotNil(dResp.Version) + + _, err = dirClient.GetProperties(context.Background(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ResourceNotFound) +} + +func (d *DirectoryRecordedTestsSuite) TestDirCreateNegativePermissions() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := testcommon.GetDirectoryClient(dirName, shareClient) + subDirClient := dirClient.NewSubdirectoryClient("subdir" + dirName) + + cResp, err := dirClient.Create(context.Background(), nil) + _require.NoError(err) + _require.NotNil(cResp.FilePermissionKey) + + // having both Permission and PermissionKey set returns error + _, err = subDirClient.Create(context.Background(), &directory.CreateOptions{ + FileSMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{None: true}, + }, + FilePermissions: &file.Permissions{ + Permission: &testcommon.SampleSDDL, + PermissionKey: cResp.FilePermissionKey, + }, + }) + _require.Error(err) +} + +func (d *DirectoryRecordedTestsSuite) TestDirCreateNegativeAttributes() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirClient := testcommon.GetDirectoryClient(testcommon.GenerateDirectoryName(testName), shareClient) + + // None attribute must be used alone. + _, err = dirClient.Create(context.Background(), &directory.CreateOptions{ + FileSMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{None: true, ReadOnly: true}, + }, + }) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.InvalidHeaderValue) +} + +func (d *DirectoryRecordedTestsSuite) TestDirCreateDeleteNegativeMultiLevelDir() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + parentDirName := "parent" + testcommon.GenerateDirectoryName(testName) + parentDirClient := shareClient.NewDirectoryClient(parentDirName) + + subDirName := "subdir" + testcommon.GenerateDirectoryName(testName) + subDirClient := parentDirClient.NewSubdirectoryClient(subDirName) + + // Directory create with subDirClient + _, err = subDirClient.Create(context.Background(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ParentNotFound) + + _, err = parentDirClient.Create(context.Background(), nil) + _require.NoError(err) + + _, err = subDirClient.Create(context.Background(), nil) + _require.NoError(err) + + _, err = subDirClient.GetProperties(context.Background(), nil) + _require.NoError(err) + + // Delete level by level + // Delete Non-empty directory should fail + _, err = parentDirClient.Delete(context.Background(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.DirectoryNotEmpty) + + _, err = subDirClient.Delete(context.Background(), nil) + _require.NoError(err) + + _, err = parentDirClient.Delete(context.Background(), nil) + _require.NoError(err) +} + +func (d *DirectoryRecordedTestsSuite) TestDirCreateEndWithSlash() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + "/" + dirClient := testcommon.GetDirectoryClient(dirName, shareClient) + + cResp, err := dirClient.Create(context.Background(), nil) + _require.NoError(err) + _require.Equal(cResp.Date.IsZero(), false) + _require.NotNil(cResp.ETag) + _require.Equal(cResp.LastModified.IsZero(), false) + _require.NotNil(cResp.RequestID) + _require.NotNil(cResp.Version) + + _, err = dirClient.GetProperties(context.Background(), nil) + _require.NoError(err) +} + +func (d *DirectoryRecordedTestsSuite) TestDirGetSetMetadataDefault() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := testcommon.CreateNewDirectory(context.Background(), _require, dirName, shareClient) + defer testcommon.DeleteDirectory(context.Background(), _require, dirClient) + + sResp, err := dirClient.SetMetadata(context.Background(), nil) + _require.NoError(err) + _require.Equal(sResp.Date.IsZero(), false) + _require.NotNil(sResp.ETag) + _require.NotNil(sResp.RequestID) + _require.NotNil(sResp.Version) + _require.NotNil(sResp.IsServerEncrypted) + + gResp, err := dirClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(gResp.Date.IsZero(), false) + _require.NotNil(gResp.ETag) + _require.Equal(gResp.LastModified.IsZero(), false) + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.Version) + _require.NotNil(gResp.IsServerEncrypted) + _require.Len(gResp.Metadata, 0) +} + +func (d *DirectoryRecordedTestsSuite) TestDirGetSetMetadataNonDefault() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := testcommon.CreateNewDirectory(context.Background(), _require, dirName, shareClient) + defer testcommon.DeleteDirectory(context.Background(), _require, dirClient) + + md := map[string]*string{ + "Foo": to.Ptr("FooValuE"), + "Bar": to.Ptr("bArvaLue"), + } + + sResp, err := dirClient.SetMetadata(context.Background(), &directory.SetMetadataOptions{ + Metadata: md, + }) + _require.NoError(err) + _require.Equal(sResp.Date.IsZero(), false) + _require.NotNil(sResp.ETag) + _require.NotNil(sResp.RequestID) + _require.NotNil(sResp.Version) + _require.NotNil(sResp.IsServerEncrypted) + + gResp, err := dirClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(gResp.Date.IsZero(), false) + _require.NotNil(gResp.ETag) + _require.Equal(gResp.LastModified.IsZero(), false) + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.Version) + _require.NotNil(gResp.IsServerEncrypted) + _require.EqualValues(gResp.Metadata, md) +} + +func (d *DirectoryRecordedTestsSuite) TestDirSetMetadataNegative() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := testcommon.CreateNewDirectory(context.Background(), _require, dirName, shareClient) + defer testcommon.DeleteDirectory(context.Background(), _require, dirClient) + + md := map[string]*string{ + "!@#$%^&*()": to.Ptr("!@#$%^&*()"), + } + + _, err = dirClient.SetMetadata(context.Background(), &directory.SetMetadataOptions{ + Metadata: md, + }) + _require.Error(err) +} + +func (d *DirectoryRecordedTestsSuite) TestDirGetPropertiesNegative() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := testcommon.GetDirectoryClient(dirName, shareClient) + + _, err = dirClient.GetProperties(context.Background(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ResourceNotFound) +} + +func (d *DirectoryRecordedTestsSuite) TestDirGetPropertiesWithBaseDirectory() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirClient := shareClient.NewRootDirectoryClient() + + gResp, err := dirClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(gResp.ETag) + _require.Equal(gResp.LastModified.IsZero(), false) + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.Version) + _require.Equal(gResp.Date.IsZero(), false) + _require.Equal(gResp.FileCreationTime.IsZero(), false) + _require.Equal(gResp.FileLastWriteTime.IsZero(), false) + _require.Equal(gResp.FileChangeTime.IsZero(), false) + _require.NotNil(gResp.IsServerEncrypted) +} + +func (d *DirectoryRecordedTestsSuite) TestDirGetSetMetadataMergeAndReplace() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := testcommon.CreateNewDirectory(context.Background(), _require, dirName, shareClient) + defer testcommon.DeleteDirectory(context.Background(), _require, dirClient) + + md := map[string]*string{ + "Color": to.Ptr("RED"), + } + + sResp, err := dirClient.SetMetadata(context.Background(), &directory.SetMetadataOptions{ + Metadata: md, + }) + _require.NoError(err) + _require.NotNil(sResp.RequestID) + _require.NotNil(sResp.IsServerEncrypted) + + gResp, err := dirClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(gResp.Date.IsZero(), false) + _require.NotNil(gResp.ETag) + _require.Equal(gResp.LastModified.IsZero(), false) + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.Version) + _require.NotNil(gResp.IsServerEncrypted) + _require.EqualValues(gResp.Metadata, md) + + md2 := map[string]*string{ + "Color": to.Ptr("WHITE"), + } + + sResp, err = dirClient.SetMetadata(context.Background(), &directory.SetMetadataOptions{ + Metadata: md2, + }) + _require.NoError(err) + _require.NotNil(sResp.RequestID) + _require.NotNil(sResp.IsServerEncrypted) + + gResp, err = dirClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(gResp.Date.IsZero(), false) + _require.NotNil(gResp.ETag) + _require.Equal(gResp.LastModified.IsZero(), false) + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.Version) + _require.NotNil(gResp.IsServerEncrypted) + _require.EqualValues(gResp.Metadata, md2) +} + +func (d *DirectoryRecordedTestsSuite) TestDirListFilesAndDirsDefault() { + _require := require.New(d.T()) + testName := d.T().Name() + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + fileName := testcommon.GenerateFileName(testName) + + for i := 0; i < 10; i++ { + _ = testcommon.CreateNewDirectory(context.Background(), _require, dirName+fmt.Sprintf("%v", i), shareClient) + } + + for i := 0; i < 5; i++ { + _ = testcommon.CreateNewFileFromShare(context.Background(), _require, fileName+fmt.Sprintf("%v", i), 2048, shareClient) + } + + dirCtr, fileCtr := 0, 0 + pager := shareClient.NewRootDirectoryClient().NewListFilesAndDirectoriesPager(nil) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + dirCtr += len(resp.Segment.Directories) + fileCtr += len(resp.Segment.Files) + for _, dir := range resp.Segment.Directories { + _require.NotNil(dir.Name) + _require.NotNil(dir.ID) + _require.Nil(dir.Attributes) + _require.Nil(dir.PermissionKey) + _require.Nil(dir.Properties.ETag) + _require.Nil(dir.Properties.ChangeTime) + _require.Nil(dir.Properties.CreationTime) + _require.Nil(dir.Properties.ContentLength) + } + for _, f := range resp.Segment.Files { + _require.NotNil(f.Name) + _require.NotNil(f.ID) + _require.Nil(f.Attributes) + _require.Nil(f.PermissionKey) + _require.Nil(f.Properties.ETag) + _require.Nil(f.Properties.ChangeTime) + _require.Nil(f.Properties.CreationTime) + _require.NotNil(f.Properties.ContentLength) + _require.Equal(*f.Properties.ContentLength, int64(2048)) + } + } + _require.Equal(dirCtr, 10) + _require.Equal(fileCtr, 5) +} + +func (d *DirectoryRecordedTestsSuite) TestDirListFilesAndDirsInclude() { + _require := require.New(d.T()) + testName := d.T().Name() + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + fileName := testcommon.GenerateFileName(testName) + + for i := 0; i < 10; i++ { + _ = testcommon.CreateNewDirectory(context.Background(), _require, dirName+fmt.Sprintf("%v", i), shareClient) + } + + for i := 0; i < 5; i++ { + _ = testcommon.CreateNewFileFromShare(context.Background(), _require, fileName+fmt.Sprintf("%v", i), 2048, shareClient) + } + + dirCtr, fileCtr := 0, 0 + pager := shareClient.NewRootDirectoryClient().NewListFilesAndDirectoriesPager(&directory.ListFilesAndDirectoriesOptions{ + Include: directory.ListFilesInclude{Timestamps: true, ETag: true, Attributes: true, PermissionKey: true}, + IncludeExtendedInfo: to.Ptr(true), + }) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + dirCtr += len(resp.Segment.Directories) + fileCtr += len(resp.Segment.Files) + for _, dir := range resp.Segment.Directories { + _require.NotNil(dir.Name) + _require.NotNil(dir.ID) + _require.NotNil(dir.Attributes) + _require.NotNil(dir.PermissionKey) + _require.NotNil(dir.Properties.ETag) + _require.NotNil(dir.Properties.ChangeTime) + _require.NotNil(dir.Properties.CreationTime) + _require.Nil(dir.Properties.ContentLength) + } + for _, f := range resp.Segment.Files { + _require.NotNil(f.Name) + _require.NotNil(f.ID) + _require.NotNil(f.Attributes) + _require.NotNil(f.PermissionKey) + _require.NotNil(f.Properties.ETag) + _require.NotNil(f.Properties.ChangeTime) + _require.NotNil(f.Properties.CreationTime) + _require.NotNil(f.Properties.ContentLength) + _require.Equal(*f.Properties.ContentLength, int64(2048)) + } + } + _require.Equal(dirCtr, 10) + _require.Equal(fileCtr, 5) +} + +func (d *DirectoryRecordedTestsSuite) TestDirListFilesAndDirsMaxResultsAndMarker() { + _require := require.New(d.T()) + testName := d.T().Name() + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + fileName := testcommon.GenerateFileName(testName) + + for i := 0; i < 10; i++ { + _ = testcommon.CreateNewDirectory(context.Background(), _require, dirName+fmt.Sprintf("%v", i), shareClient) + } + + for i := 0; i < 5; i++ { + _ = testcommon.CreateNewFileFromShare(context.Background(), _require, fileName+fmt.Sprintf("%v", i), 2048, shareClient) + } + + dirCtr, fileCtr := 0, 0 + pager := shareClient.NewRootDirectoryClient().NewListFilesAndDirectoriesPager(&directory.ListFilesAndDirectoriesOptions{ + MaxResults: to.Ptr(int32(2)), + }) + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + dirCtr += len(resp.Segment.Directories) + fileCtr += len(resp.Segment.Files) + _require.Equal(dirCtr+fileCtr, 2) + + pager = shareClient.NewRootDirectoryClient().NewListFilesAndDirectoriesPager(&directory.ListFilesAndDirectoriesOptions{ + Marker: resp.NextMarker, + MaxResults: to.Ptr(int32(5)), + }) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + dirCtr += len(resp.Segment.Directories) + fileCtr += len(resp.Segment.Files) + } + _require.Equal(dirCtr, 10) + _require.Equal(fileCtr, 5) +} + +func (d *DirectoryRecordedTestsSuite) TestDirListFilesAndDirsWithPrefix() { + _require := require.New(d.T()) + testName := d.T().Name() + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + fileName := testcommon.GenerateFileName(testName) + + for i := 0; i < 10; i++ { + _ = testcommon.CreateNewDirectory(context.Background(), _require, fmt.Sprintf("%v", i)+dirName, shareClient) + } + + for i := 0; i < 5; i++ { + _ = testcommon.CreateNewFileFromShare(context.Background(), _require, fmt.Sprintf("%v", i)+fileName, 2048, shareClient) + } + + dirCtr, fileCtr := 0, 0 + pager := shareClient.NewRootDirectoryClient().NewListFilesAndDirectoriesPager(&directory.ListFilesAndDirectoriesOptions{ + Prefix: to.Ptr("1"), + }) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + dirCtr += len(resp.Segment.Directories) + fileCtr += len(resp.Segment.Files) + if len(resp.Segment.Directories) > 0 { + _require.NotNil(resp.Segment.Directories[0].Name) + _require.Equal(*resp.Segment.Directories[0].Name, "1"+dirName) + } + if len(resp.Segment.Files) > 0 { + _require.NotNil(resp.Segment.Files[0].Name) + _require.Equal(*resp.Segment.Files[0].Name, "1"+fileName) + } + } + _require.Equal(dirCtr, 1) + _require.Equal(fileCtr, 1) +} + +func (d *DirectoryRecordedTestsSuite) TestDirListFilesAndDirsMaxResultsNegative() { + _require := require.New(d.T()) + testName := d.T().Name() + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + fileName := testcommon.GenerateFileName(testName) + + for i := 0; i < 2; i++ { + _ = testcommon.CreateNewDirectory(context.Background(), _require, dirName+fmt.Sprintf("%v", i), shareClient) + } + + for i := 0; i < 2; i++ { + _ = testcommon.CreateNewFileFromShare(context.Background(), _require, fileName+fmt.Sprintf("%v", i), 2048, shareClient) + } + + pager := shareClient.NewRootDirectoryClient().NewListFilesAndDirectoriesPager(&directory.ListFilesAndDirectoriesOptions{ + MaxResults: to.Ptr(int32(-1)), + }) + _, err = pager.NextPage(context.Background()) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.OutOfRangeQueryParameterValue) +} + +func (d *DirectoryRecordedTestsSuite) TestDirListFilesAndDirsSnapshot() { + _require := require.New(d.T()) + testName := d.T().Name() + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer func() { + _, err := shareClient.Delete(context.Background(), &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) + _require.NoError(err) + }() + + dirName := testcommon.GenerateDirectoryName(testName) + fileName := testcommon.GenerateFileName(testName) + + for i := 0; i < 10; i++ { + _ = testcommon.CreateNewDirectory(context.Background(), _require, dirName+fmt.Sprintf("%v", i), shareClient) + } + + for i := 0; i < 5; i++ { + _ = testcommon.CreateNewFileFromShare(context.Background(), _require, fileName+fmt.Sprintf("%v", i), 2048, shareClient) + } + + snapResp, err := shareClient.CreateSnapshot(context.Background(), nil) + _require.NoError(err) + _require.NotNil(snapResp.Snapshot) + + _, err = shareClient.NewRootDirectoryClient().GetProperties(context.Background(), &directory.GetPropertiesOptions{ShareSnapshot: snapResp.Snapshot}) + _require.NoError(err) + + dirCtr, fileCtr := 0, 0 + pager := shareClient.NewRootDirectoryClient().NewListFilesAndDirectoriesPager(&directory.ListFilesAndDirectoriesOptions{ + ShareSnapshot: snapResp.Snapshot, + }) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + dirCtr += len(resp.Segment.Directories) + fileCtr += len(resp.Segment.Files) + } + _require.Equal(dirCtr, 10) + _require.Equal(fileCtr, 5) +} + +func (d *DirectoryRecordedTestsSuite) TestDirListFilesAndDirsInsideDir() { + _require := require.New(d.T()) + testName := d.T().Name() + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + fileName := testcommon.GenerateFileName(testName) + + dirClient := testcommon.CreateNewDirectory(context.Background(), _require, dirName, shareClient) + + for i := 0; i < 5; i++ { + _, err = dirClient.NewSubdirectoryClient("subdir"+fmt.Sprintf("%v", i)).Create(context.Background(), nil) + _require.NoError(err) + } + + for i := 0; i < 5; i++ { + _, err = dirClient.NewFileClient(fileName+fmt.Sprintf("%v", i)).Create(context.Background(), 0, nil) + _require.NoError(err) + } + + dirCtr, fileCtr := 0, 0 + pager := dirClient.NewListFilesAndDirectoriesPager(&directory.ListFilesAndDirectoriesOptions{ + Include: directory.ListFilesInclude{Timestamps: true, ETag: true, Attributes: true, PermissionKey: true}, + }) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + dirCtr += len(resp.Segment.Directories) + fileCtr += len(resp.Segment.Files) + for _, dir := range resp.Segment.Directories { + _require.NotNil(dir.Name) + _require.NotNil(dir.ID) + _require.NotNil(dir.Attributes) + _require.NotNil(dir.PermissionKey) + _require.NotNil(dir.Properties.ETag) + _require.NotNil(dir.Properties.ChangeTime) + _require.NotNil(dir.Properties.CreationTime) + _require.Nil(dir.Properties.ContentLength) + } + for _, f := range resp.Segment.Files { + _require.NotNil(f.Name) + _require.NotNil(f.ID) + _require.NotNil(f.Attributes) + _require.NotNil(f.PermissionKey) + _require.NotNil(f.Properties.ETag) + _require.NotNil(f.Properties.ChangeTime) + _require.NotNil(f.Properties.CreationTime) + _require.NotNil(f.Properties.ContentLength) + _require.Equal(*f.Properties.ContentLength, int64(0)) + } + } + _require.Equal(dirCtr, 5) + _require.Equal(fileCtr, 5) +} + +func (d *DirectoryRecordedTestsSuite) TestDirListHandlesDefault() { + _require := require.New(d.T()) + testName := d.T().Name() + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirClient := testcommon.CreateNewDirectory(context.Background(), _require, testcommon.GenerateDirectoryName(testName), shareClient) + + resp, err := dirClient.ListHandles(context.Background(), nil) + _require.NoError(err) + _require.Len(resp.Handles, 0) + _require.NotNil(resp.NextMarker) + _require.Equal(*resp.NextMarker, "") +} + +func (d *DirectoryRecordedTestsSuite) TestDirForceCloseHandlesDefault() { + _require := require.New(d.T()) + testName := d.T().Name() + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirClient := testcommon.CreateNewDirectory(context.Background(), _require, testcommon.GenerateDirectoryName(testName), shareClient) + + resp, err := dirClient.ForceCloseHandles(context.Background(), "*", nil) + _require.NoError(err) + _require.EqualValues(*resp.NumberOfHandlesClosed, 0) + _require.EqualValues(*resp.NumberOfHandlesFailedToClose, 0) + _require.Nil(resp.Marker) +} + +func (d *DirectoryRecordedTestsSuite) TestDirectoryCreateNegativeWithoutSAS() { + _require := require.New(d.T()) + testName := d.T().Name() + + accountName, _ := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirURL := "https://" + accountName + ".file.core.windows.net/" + shareName + "/" + dirName + + options := &directory.ClientOptions{} + testcommon.SetClientOptions(d.T(), &options.ClientOptions) + dirClient, err := directory.NewClientWithNoCredential(dirURL, nil) + _require.NoError(err) + + _, err = dirClient.Create(context.Background(), nil) + _require.Error(err) +} diff --git a/sdk/storage/azfile/directory/constants.go b/sdk/storage/azfile/directory/constants.go new file mode 100644 index 000000000000..2b16931bbc56 --- /dev/null +++ b/sdk/storage/azfile/directory/constants.go @@ -0,0 +1,24 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package directory + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + +// ListFilesIncludeType defines values for ListFilesIncludeType +type ListFilesIncludeType = generated.ListFilesIncludeType + +const ( + ListFilesIncludeTypeTimestamps ListFilesIncludeType = generated.ListFilesIncludeTypeTimestamps + ListFilesIncludeTypeETag ListFilesIncludeType = generated.ListFilesIncludeTypeEtag + ListFilesIncludeTypeAttributes ListFilesIncludeType = generated.ListFilesIncludeTypeAttributes + ListFilesIncludeTypePermissionKey ListFilesIncludeType = generated.ListFilesIncludeTypePermissionKey +) + +// PossibleListFilesIncludeTypeValues returns the possible values for the ListFilesIncludeType const type. +func PossibleListFilesIncludeTypeValues() []ListFilesIncludeType { + return generated.PossibleListFilesIncludeTypeValues() +} diff --git a/sdk/storage/azfile/directory/examples_test.go b/sdk/storage/azfile/directory/examples_test.go new file mode 100644 index 000000000000..0d355ff82191 --- /dev/null +++ b/sdk/storage/azfile/directory/examples_test.go @@ -0,0 +1,193 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package directory_test + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/directory" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/testcommon" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "log" + "os" + "time" +) + +func handleError(err error) { + if err != nil { + log.Fatal(err.Error()) + } +} + +func Example_client_NewClient() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + client, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + shareClient := client.NewShareClient("testShare") + + dirClient := shareClient.NewDirectoryClient("testDir") + fmt.Println(dirClient.URL()) + +} + +func Example_directory_NewClientFromConnectionString() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + dirName := "testDirectory" + dirClient, err := directory.NewClientFromConnectionString(connectionString, shareName, dirName, nil) + handleError(err) + fmt.Println(dirClient.URL()) +} + +func Example_directoryClient_Create() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + dirName := "testDirectory" + dirClient, err := directory.NewClientFromConnectionString(connectionString, shareName, dirName, nil) + handleError(err) + _, err = dirClient.Create(context.Background(), nil) + handleError(err) + fmt.Println("Directory created") + + _, err = dirClient.Delete(context.Background(), nil) + handleError(err) + fmt.Println("Directory deleted") +} + +func Example_directoryClient_SetProperties() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + dirName := "testDirectory" + dirClient, err := directory.NewClientFromConnectionString(connectionString, shareName, dirName, nil) + handleError(err) + _, err = dirClient.Create(context.Background(), nil) + handleError(err) + fmt.Println("Directory created") + + creationTime := time.Now().Add(5 * time.Minute).Round(time.Microsecond) + lastWriteTime := time.Now().Add(10 * time.Minute).Round(time.Millisecond) + + // Set the custom permissions + _, err = dirClient.SetProperties(context.Background(), &directory.SetPropertiesOptions{ + FileSMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{ + ReadOnly: true, + System: true, + }, + CreationTime: &creationTime, + LastWriteTime: &lastWriteTime, + }, + FilePermissions: &file.Permissions{ + Permission: &testcommon.SampleSDDL, + }, + }) + handleError(err) + fmt.Println("Directory properties set") + + _, err = dirClient.GetProperties(context.Background(), nil) + handleError(err) + fmt.Println("Directory properties retrieved") + + _, err = dirClient.Delete(context.Background(), nil) + handleError(err) + fmt.Println("Directory deleted") +} + +func Example_directoryClient_ListFilesAndDirectoriesSegment() { + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + parentDirName := "testParentDirectory" + childDirName := "testChildDirectory" + parentDirClient, err := directory.NewClientFromConnectionString(connectionString, shareName, parentDirName, nil) + handleError(err) + _, err = parentDirClient.Create(context.Background(), nil) + handleError(err) + fmt.Println("Parent directory created") + + childDirClient := parentDirClient.NewSubdirectoryClient(childDirName) + _, err = childDirClient.Create(context.Background(), nil) + handleError(err) + fmt.Println("Child directory created") + + pager := parentDirClient.NewListFilesAndDirectoriesPager(nil) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + handleError(err) // if err is not nil, break the loop. + for _, _dir := range resp.Segment.Directories { + fmt.Printf("%v", _dir) + } + } + + _, err = childDirClient.Delete(context.Background(), nil) + handleError(err) + fmt.Println("Child directory deleted") + + _, err = parentDirClient.Delete(context.Background(), nil) + handleError(err) + fmt.Println("Parent directory deleted") +} + +func Example_directoryClient_SetMetadata() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + dirName := "testDirectory" + dirClient, err := directory.NewClientFromConnectionString(connectionString, shareName, dirName, nil) + handleError(err) + _, err = dirClient.Create(context.Background(), nil) + handleError(err) + fmt.Println("Directory created") + + md := map[string]*string{ + "Foo": to.Ptr("FooValuE"), + "Bar": to.Ptr("bArvaLue"), + } + + _, err = dirClient.SetMetadata(context.Background(), &directory.SetMetadataOptions{ + Metadata: md, + }) + handleError(err) + fmt.Println("Directory metadata set") + + _, err = dirClient.Delete(context.Background(), nil) + handleError(err) + fmt.Println("Directory deleted") +} diff --git a/sdk/storage/azfile/directory/models.go b/sdk/storage/azfile/directory/models.go new file mode 100644 index 000000000000..950cf1a91d63 --- /dev/null +++ b/sdk/storage/azfile/directory/models.go @@ -0,0 +1,255 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package directory + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" + "reflect" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CreateOptions contains the optional parameters for the Client.Create method. +type CreateOptions struct { + // The default value is 'Directory' for Attributes and 'now' for CreationTime and LastWriteTime fields in file.SMBProperties. + FileSMBProperties *file.SMBProperties + // The default value is 'inherit' for Permission field in file.Permissions. + FilePermissions *file.Permissions + // A name-value pair to associate with a file storage object. + Metadata map[string]*string +} + +func (o *CreateOptions) format() (fileAttributes string, fileCreationTime string, fileLastWriteTime string, createOptions *generated.DirectoryClientCreateOptions) { + if o == nil { + return shared.FileAttributesDirectory, shared.DefaultCurrentTimeString, shared.DefaultCurrentTimeString, &generated.DirectoryClientCreateOptions{ + FilePermission: to.Ptr(shared.DefaultFilePermissionString), + } + } + + fileAttributes, fileCreationTime, fileLastWriteTime = o.FileSMBProperties.Format(true, shared.FileAttributesDirectory, shared.DefaultCurrentTimeString) + + permission, permissionKey := o.FilePermissions.Format(shared.DefaultFilePermissionString) + + createOptions = &generated.DirectoryClientCreateOptions{ + FilePermission: permission, + FilePermissionKey: permissionKey, + Metadata: o.Metadata, + } + + return +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DeleteOptions contains the optional parameters for the Client.Delete method. +type DeleteOptions struct { + // placeholder for future options +} + +func (o *DeleteOptions) format() *generated.DirectoryClientDeleteOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPropertiesOptions contains the optional parameters for the Client.GetProperties method. +type GetPropertiesOptions struct { + // ShareSnapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query for the directory properties. + ShareSnapshot *string +} + +func (o *GetPropertiesOptions) format() *generated.DirectoryClientGetPropertiesOptions { + if o == nil { + return nil + } + + return &generated.DirectoryClientGetPropertiesOptions{ + Sharesnapshot: o.ShareSnapshot, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetPropertiesOptions contains the optional parameters for the Client.SetProperties method. +type SetPropertiesOptions struct { + // The default value is 'preserve' for Attributes, CreationTime and LastWriteTime fields in file.SMBProperties. + FileSMBProperties *file.SMBProperties + // The default value is 'preserve' for Permission field in file.Permissions. + FilePermissions *file.Permissions +} + +func (o *SetPropertiesOptions) format() (fileAttributes string, fileCreationTime string, fileLastWriteTime string, setPropertiesOptions *generated.DirectoryClientSetPropertiesOptions) { + if o == nil { + return shared.DefaultPreserveString, shared.DefaultPreserveString, shared.DefaultPreserveString, &generated.DirectoryClientSetPropertiesOptions{ + FilePermission: to.Ptr(shared.DefaultPreserveString), + } + } + + fileAttributes, fileCreationTime, fileLastWriteTime = o.FileSMBProperties.Format(true, shared.DefaultPreserveString, shared.DefaultPreserveString) + + permission, permissionKey := o.FilePermissions.Format(shared.DefaultPreserveString) + + setPropertiesOptions = &generated.DirectoryClientSetPropertiesOptions{ + FilePermission: permission, + FilePermissionKey: permissionKey, + } + return +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetMetadataOptions contains the optional parameters for the Client.SetMetadata method. +type SetMetadataOptions struct { + // A name-value pair to associate with a file storage object. + Metadata map[string]*string +} + +func (o *SetMetadataOptions) format() *generated.DirectoryClientSetMetadataOptions { + if o == nil { + return nil + } + + return &generated.DirectoryClientSetMetadataOptions{ + Metadata: o.Metadata, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ListFilesAndDirectoriesOptions contains the optional parameters for the Client.NewListFilesAndDirectoriesPager method. +type ListFilesAndDirectoriesOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include ListFilesInclude + // Include extended information. + IncludeExtendedInfo *bool + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the response body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + // Specifies the maximum number of entries to return. If the request does not specify maxresults, or specifies a value greater + // than 5,000, the server will return up to 5,000 items. + MaxResults *int32 + // Filters the results to return only entries whose name begins with the specified prefix. + Prefix *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query for the list of files and directories. + ShareSnapshot *string +} + +// ListFilesInclude specifies one or more datasets to include in the response. +type ListFilesInclude struct { + Timestamps, ETag, Attributes, PermissionKey bool +} + +func (l ListFilesInclude) format() []generated.ListFilesIncludeType { + if reflect.ValueOf(l).IsZero() { + return nil + } + + var include []generated.ListFilesIncludeType + + if l.Timestamps { + include = append(include, ListFilesIncludeTypeTimestamps) + } + if l.ETag { + include = append(include, ListFilesIncludeTypeETag) + } + if l.Attributes { + include = append(include, ListFilesIncludeTypeAttributes) + } + if l.PermissionKey { + include = append(include, ListFilesIncludeTypePermissionKey) + } + + return include +} + +// FilesAndDirectoriesListSegment - Abstract for entries that can be listed from directory. +type FilesAndDirectoriesListSegment = generated.FilesAndDirectoriesListSegment + +// Directory - A listed directory item. +type Directory = generated.Directory + +// File - A listed file item. +type File = generated.File + +// FileProperty - File properties. +type FileProperty = generated.FileProperty + +// --------------------------------------------------------------------------------------------------------------------- + +// ListHandlesOptions contains the optional parameters for the Client.ListHandles method. +type ListHandlesOptions struct { + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the response body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + // Specifies the maximum number of entries to return. If the request does not specify maxresults, or specifies a value greater + // than 5,000, the server will return up to 5,000 items. + MaxResults *int32 + // Specifies operation should apply to the directory specified in the URI, its files, its subdirectories and their files. + Recursive *bool + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string +} + +func (o *ListHandlesOptions) format() *generated.DirectoryClientListHandlesOptions { + if o == nil { + return nil + } + + return &generated.DirectoryClientListHandlesOptions{ + Marker: o.Marker, + Maxresults: o.MaxResults, + Recursive: o.Recursive, + Sharesnapshot: o.ShareSnapshot, + } +} + +// Handle - A listed Azure Storage handle item. +type Handle = generated.Handle + +// --------------------------------------------------------------------------------------------------------------------- + +// ForceCloseHandlesOptions contains the optional parameters for the Client.ForceCloseHandles method. +type ForceCloseHandlesOptions struct { + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the response body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + // Specifies operation should apply to the directory specified in the URI, its files, its subdirectories and their files. + Recursive *bool + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string +} + +func (o *ForceCloseHandlesOptions) format() *generated.DirectoryClientForceCloseHandlesOptions { + if o == nil { + return nil + } + + return &generated.DirectoryClientForceCloseHandlesOptions{ + Marker: o.Marker, + Recursive: o.Recursive, + Sharesnapshot: o.ShareSnapshot, + } +} diff --git a/sdk/storage/azfile/directory/responses.go b/sdk/storage/azfile/directory/responses.go new file mode 100644 index 000000000000..28f2470b10ba --- /dev/null +++ b/sdk/storage/azfile/directory/responses.go @@ -0,0 +1,39 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package directory + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + +// CreateResponse contains the response from method Client.Create. +type CreateResponse = generated.DirectoryClientCreateResponse + +// DeleteResponse contains the response from method Client.Delete. +type DeleteResponse = generated.DirectoryClientDeleteResponse + +// GetPropertiesResponse contains the response from method Client.GetProperties. +type GetPropertiesResponse = generated.DirectoryClientGetPropertiesResponse + +// SetPropertiesResponse contains the response from method Client.SetProperties. +type SetPropertiesResponse = generated.DirectoryClientSetPropertiesResponse + +// SetMetadataResponse contains the response from method Client.SetMetadata. +type SetMetadataResponse = generated.DirectoryClientSetMetadataResponse + +// ListFilesAndDirectoriesResponse contains the response from method Client.NewListFilesAndDirectoriesPager. +type ListFilesAndDirectoriesResponse = generated.DirectoryClientListFilesAndDirectoriesSegmentResponse + +// ListFilesAndDirectoriesSegmentResponse - An enumeration of directories and files. +type ListFilesAndDirectoriesSegmentResponse = generated.ListFilesAndDirectoriesSegmentResponse + +// ListHandlesResponse contains the response from method Client.ListHandles. +type ListHandlesResponse = generated.DirectoryClientListHandlesResponse + +// ListHandlesSegmentResponse - An enumeration of handles. +type ListHandlesSegmentResponse = generated.ListHandlesResponse + +// ForceCloseHandlesResponse contains the response from method Client.ForceCloseHandles. +type ForceCloseHandlesResponse = generated.DirectoryClientForceCloseHandlesResponse diff --git a/sdk/storage/azfile/doc.go b/sdk/storage/azfile/doc.go new file mode 100644 index 000000000000..51d645839165 --- /dev/null +++ b/sdk/storage/azfile/doc.go @@ -0,0 +1,229 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +/* +Package azfile provides access to Azure File Storage. +For more information please see https://learn.microsoft.com/rest/api/storageservices/file-service-rest-api + +The azfile package is capable of :- + - Creating, deleting, and querying shares in an account + - Creating, deleting, and querying directories in a share + - Creating, deleting, and querying files in a share or directory + - Creating Shared Access Signature for authentication + +Types of Resources + +The azfile package allows you to interact with four types of resources :- + +* Azure storage accounts. +* Shares within those storage accounts. +* Directories within those shares. +* Files within those shares or directories. + +The Azure File Storage (azfile) client library for Go allows you to interact with each of these components through the use of a dedicated client object. +To create a client object, you will need the account's file service endpoint URL and a credential that allows you to access the account. + +Types of Credentials + +The clients support different forms of authentication. +The azfile library supports authorization via a shared key, Connection String, +or with a Shared Access Signature token. + +Using a Shared Key + +To use an account shared key (aka account key or access key), provide the key as a string. +This can be found in your storage account in the Azure Portal under the "Access Keys" section. + +Use the key as the credential parameter to authenticate the client: + + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handle(err) + + serviceClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handle(err) + + fmt.Println(serviceClient.URL()) + +Using a Connection String + +Depending on your use case and authorization method, you may prefer to initialize a client instance with a connection string instead of providing the account URL and credential separately. +To do this, pass the connection string to the service client's `NewClientFromConnectionString` method. +The connection string can be found in your storage account in the Azure Portal under the "Access Keys" section. + + connStr := "DefaultEndpointsProtocol=https;AccountName=;AccountKey=;EndpointSuffix=core.windows.net" + serviceClient, err := azfile.NewServiceClientFromConnectionString(connStr, nil) + handle(err) + +Using a Shared Access Signature (SAS) Token + +To use a shared access signature (SAS) token, provide the token at the end of your service URL. +You can generate a SAS token from the Azure Portal under Shared Access Signature or use the service.Client.GetSASURL() functions. + + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handle(err) + serviceClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handle(err) + fmt.Println(serviceClient.URL()) + + // Alternatively, you can create SAS on the fly + + resources := sas.AccountResourceTypes{Service: true} + permission := sas.AccountPermissions{Read: true} + start := time.Now() + expiry := start.AddDate(0, 0, 1) + serviceURLWithSAS, err := serviceClient.GetSASURL(resources, permission, expiry, &service.GetSASURLOptions{StartTime: &start}) + handle(err) + + serviceClientWithSAS, err := service.NewClientWithNoCredential(serviceURLWithSAS, nil) + handle(err) + + fmt.Println(serviceClientWithSAS.URL()) + +Types of Clients + +There are four different clients provided to interact with the various components of the File Service: + +1. **`ServiceClient`** + * Get and set account settings. + * Query, create, delete and restore shares within the account. + +2. **`ShareClient`** + * Get and set share access settings, properties, and metadata. + * Create, delete, and query directories and files within the share. + * `lease.ShareClient` to support share lease management. + +3. **`DirectoryClient`** + * Create or delete operations on a given directory. + * Get and set directory properties. + * List sub-directories and files within the given directory. + +3. **`FileClient`** + * Get and set file properties. + * Perform CRUD operations on a given file. + * `FileLeaseClient` to support file lease management. + +Examples + + // Your account name and key can be obtained from the Azure Portal. + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handle(err) + + // The service URL for file endpoints is usually in the form: http(s)://.file.core.windows.net/ + serviceClient, err := service.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.file.core.windows.net/", accountName), cred, nil) + handle(err) + + // ===== 1. Create a share ===== + + // First, create a share client, and use the Create method to create a new share in your account + shareClient := serviceClient.NewShareClient("testshare") + handle(err) + + // All APIs have an options' bag struct as a parameter. + // The options' bag struct allows you to specify optional parameters such as metadata, quota, etc. + // If you want to use the default options, pass in nil. + _, err = shareClient.Create(context.TODO(), nil) + handle(err) + + // ===== 2. Create a directory ===== + + // First, create a directory client, and use the Create method to create a new directory in the share + dirClient := shareClient.NewDirectoryClient("testdir") + _, err = dirClient.Create(context.TODO(), nil) + + // ===== 3. Upload and Download a file ===== + uploadData := "Hello world!" + + // First, create a file client, and use the Create method to create a new file in the directory + fileClient := dirClient.NewFileClient("HelloWorld.txt") + _, err = fileClient.Create(context.TODO(), int64(len(uploadData)), nil) + handle(err) + + // Upload data to the file + _, err = fileClient.UploadRange(context.TODO(), 0, streaming.NopCloser(strings.NewReader(uploadData)), nil) + handle(err) + + // Download the file's contents and ensure that the download worked properly + fileDownloadResponse, err := fileClient.DownloadStream(context.TODO(), nil) + handle(err) + + // Use io.readAll to read the downloaded data. + // RetryReaderOptions has a lot of in-depth tuning abilities, but for the sake of simplicity, we'll omit those here. + reader := fileDownloadResponse.Body + downloadData, err := io.ReadAll(reader) + handle(err) + if string(downloadData) != uploadData { + handle(errors.New("uploaded data should be same as downloaded data")) + } + + if err = reader.Close(); err != nil { + handle(err) + return + } + + // ===== 3. List directories and files in a share ===== + // List methods returns a pager object which can be used to iterate over the results of a paging operation. + // To iterate over a page use the NextPage(context.Context) to fetch the next page of results. + // PageResponse() can be used to iterate over the results of the specific page. + // Always check the Err() method after paging to see if an error was returned by the pager. A pager will return either an error or the page of results. + // The below code lists the contents only for a single level of the directory hierarchy. + rootDirClient := shareClient.NewRootDirectoryClient() + pager := rootDirClient.NewListFilesAndDirectoriesPager(nil) + for pager.More() { + resp, err := pager.NextPage(context.TODO()) + handle(err) + for _, d := range resp.Segment.Directories { + fmt.Println(*d.Name) + } + for _, f := range resp.Segment.Files { + fmt.Println(*f.Name) + } + } + + // Delete the file. + _, err = fileClient.Delete(context.TODO(), nil) + handle(err) + + // Delete the directory. + _, err = dirClient.Delete(context.TODO(), nil) + handle(err) + + // Delete the share. + _, err = shareClient.Delete(context.TODO(), nil) + handle(err) +*/ + +package azfile diff --git a/sdk/storage/azfile/file/chunkwriting.go b/sdk/storage/azfile/file/chunkwriting.go new file mode 100644 index 000000000000..21070c19bcad --- /dev/null +++ b/sdk/storage/azfile/file/chunkwriting.go @@ -0,0 +1,189 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "bytes" + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "io" + "sync" +) + +// chunkWriter provides methods to upload chunks that represent a file to a server. +// This allows us to provide a local implementation that fakes the server for hermetic testing. +type chunkWriter interface { + UploadRange(context.Context, int64, io.ReadSeekCloser, *UploadRangeOptions) (UploadRangeResponse, error) +} + +// bufferManager provides an abstraction for the management of buffers. +// this is mostly for testing purposes, but does allow for different implementations without changing the algorithm. +type bufferManager[T ~[]byte] interface { + // Acquire returns the channel that contains the pool of buffers. + Acquire() <-chan T + + // Release releases the buffer back to the pool for reuse/cleanup. + Release(T) + + // Grow grows the number of buffers, up to the predefined max. + // It returns the total number of buffers or an error. + // No error is returned if the number of buffers has reached max. + // This is called only from the reading goroutine. + Grow() (int, error) + + // Free cleans up all buffers. + Free() +} + +// copyFromReader copies a source io.Reader to file storage using concurrent uploads. +func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst chunkWriter, options UploadStreamOptions, getBufferManager func(maxBuffers int, bufferSize int64) bufferManager[T]) error { + options.setDefaults() + + wg := sync.WaitGroup{} // Used to know when all outgoing chunks have finished processing + errCh := make(chan error, 1) // contains the first error encountered during processing + var err error + + buffers := getBufferManager(options.Concurrency, options.ChunkSize) + defer buffers.Free() + + // this controls the lifetime of the uploading goroutines. + // if an error is encountered, cancel() is called which will terminate all uploads. + // NOTE: the ordering is important here. cancel MUST execute before + // cleaning up the buffers so that any uploading goroutines exit first, + // releasing their buffers back to the pool for cleanup. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // This goroutine grabs a buffer, reads from the stream into the buffer, + // then creates a goroutine to upload/stage the chunk. + for chunkNum := uint32(0); true; chunkNum++ { + var buffer T + select { + case buffer = <-buffers.Acquire(): + // got a buffer + default: + // no buffer available; allocate a new buffer if possible + if _, err := buffers.Grow(); err != nil { + return err + } + + // either grab the newly allocated buffer or wait for one to become available + buffer = <-buffers.Acquire() + } + + var n int + n, err = io.ReadFull(src, buffer) + + if n > 0 { + // some data was read, upload it + wg.Add(1) // We're posting a buffer to be sent + + // NOTE: we must pass chunkNum as an arg to our goroutine else + // it's captured by reference and can change underneath us! + go func(chunkNum uint32) { + // Upload the outgoing chunk, matching the number of bytes read + offset := int64(chunkNum) * options.ChunkSize + uploadRangeOptions := options.getUploadRangeOptions() + _, err := dst.UploadRange(ctx, offset, streaming.NopCloser(bytes.NewReader(buffer[:n])), uploadRangeOptions) + if err != nil { + select { + case errCh <- err: + // error was set + default: + // some other error is already set + } + cancel() + } + buffers.Release(buffer) // The goroutine reading from the stream can reuse this buffer now + + // signal that the chunk has been staged. + // we MUST do this after attempting to write to errCh + // to avoid it racing with the reading goroutine. + wg.Done() + }(chunkNum) + } else { + // nothing was read so the buffer is empty, send it back for reuse/clean-up. + buffers.Release(buffer) + } + + if err != nil { // The reader is done, no more outgoing buffers + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + // these are expected errors, we don't surface those + err = nil + } else { + // some other error happened, terminate any outstanding uploads + cancel() + } + break + } + } + + wg.Wait() // Wait for all outgoing chunks to complete + + if err != nil { + // there was an error reading from src, favor this error over any error during staging + return err + } + + select { + case err = <-errCh: + // there was an error during staging + return err + default: + // no error was encountered + } + + // All chunks uploaded, return nil error + return nil +} + +// mmbPool implements the bufferManager interface. +// it uses anonymous memory mapped files for buffers. +// don't use this type directly, use newMMBPool() instead. +type mmbPool struct { + buffers chan mmb + count int + max int + size int64 +} + +func newMMBPool(maxBuffers int, bufferSize int64) bufferManager[mmb] { + return &mmbPool{ + buffers: make(chan mmb, maxBuffers), + max: maxBuffers, + size: bufferSize, + } +} + +func (pool *mmbPool) Acquire() <-chan mmb { + return pool.buffers +} + +func (pool *mmbPool) Grow() (int, error) { + if pool.count < pool.max { + buffer, err := newMMB(pool.size) + if err != nil { + return 0, err + } + pool.buffers <- buffer + pool.count++ + } + return pool.count, nil +} + +func (pool *mmbPool) Release(buffer mmb) { + pool.buffers <- buffer +} + +func (pool *mmbPool) Free() { + for i := 0; i < pool.count; i++ { + buffer := <-pool.buffers + buffer.delete() + } + pool.count = 0 +} diff --git a/sdk/storage/azfile/file/client.go b/sdk/storage/azfile/file/client.go new file mode 100644 index 000000000000..432f8ae379a1 --- /dev/null +++ b/sdk/storage/azfile/file/client.go @@ -0,0 +1,505 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "bytes" + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + "io" + "os" + "strings" + "sync" + "time" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions base.ClientOptions + +// Client represents a URL to the Azure Storage file. +type Client base.Client[generated.FileClient] + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a file or with a shared access signature (SAS) token. +// - fileURL - the URL of the file e.g. https://.file.core.windows.net/share/directoryPath/file? +// - options - client options; pass nil to accept the default values +// +// The directoryPath is optional in the fileURL. If omitted, it points to file within the specified share. +func NewClientWithNoCredential(fileURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewFileClient(fileURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - fileURL - the URL of the file e.g. https://.file.core.windows.net/share/directoryPath/file +// - cred - a SharedKeyCredential created with the matching file's storage account and access key +// - options - client options; pass nil to accept the default values +// +// The directoryPath is optional in the fileURL. If omitted, it points to file within the specified share. +func NewClientWithSharedKeyCredential(fileURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewFileClient(fileURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - shareName - the name of the share within the storage account +// - filePath - the path of the file within the share +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, shareName string, filePath string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + + filePath = strings.ReplaceAll(filePath, "\\", "/") + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, shareName, filePath) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (f *Client) generated() *generated.FileClient { + return base.InnerClient((*base.Client[generated.FileClient])(f)) +} + +func (f *Client) sharedKey() *SharedKeyCredential { + return base.SharedKey((*base.Client[generated.FileClient])(f)) +} + +// URL returns the URL endpoint used by the Client object. +func (f *Client) URL() string { + return f.generated().Endpoint() +} + +// Create operation creates a new file or replaces a file. Note it only initializes the file with no content. +// - fileContentLength: Specifies the maximum size for the file in bytes, up to 4 TB. +// +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/create-file. +func (f *Client) Create(ctx context.Context, fileContentLength int64, options *CreateOptions) (CreateResponse, error) { + fileAttributes, fileCreationTime, fileLastWriteTime, fileCreateOptions, fileHTTPHeaders, leaseAccessConditions := options.format() + resp, err := f.generated().Create(ctx, fileContentLength, fileAttributes, fileCreationTime, fileLastWriteTime, fileCreateOptions, fileHTTPHeaders, leaseAccessConditions) + return resp, err +} + +// Delete operation removes the file from the storage account. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/delete-file2. +func (f *Client) Delete(ctx context.Context, options *DeleteOptions) (DeleteResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := f.generated().Delete(ctx, opts, leaseAccessConditions) + return resp, err +} + +// GetProperties operation returns all user-defined metadata, standard HTTP properties, and system properties for the file. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-file-properties. +func (f *Client) GetProperties(ctx context.Context, options *GetPropertiesOptions) (GetPropertiesResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := f.generated().GetProperties(ctx, opts, leaseAccessConditions) + return resp, err +} + +// SetHTTPHeaders operation sets HTTP headers on the file. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-file-properties. +func (f *Client) SetHTTPHeaders(ctx context.Context, options *SetHTTPHeadersOptions) (SetHTTPHeadersResponse, error) { + fileAttributes, fileCreationTime, fileLastWriteTime, opts, fileHTTPHeaders, leaseAccessConditions := options.format() + resp, err := f.generated().SetHTTPHeaders(ctx, fileAttributes, fileCreationTime, fileLastWriteTime, opts, fileHTTPHeaders, leaseAccessConditions) + return resp, err +} + +// SetMetadata operation sets user-defined metadata for the specified file. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-file-metadata. +func (f *Client) SetMetadata(ctx context.Context, options *SetMetadataOptions) (SetMetadataResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := f.generated().SetMetadata(ctx, opts, leaseAccessConditions) + return resp, err +} + +// StartCopyFromURL operation copies the data at the source URL to a file. +// - copySource: specifies the URL of the source file or blob, up to 2KiB in length. +// +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/copy-file. +func (f *Client) StartCopyFromURL(ctx context.Context, copySource string, options *StartCopyFromURLOptions) (StartCopyFromURLResponse, error) { + opts, copyFileSmbInfo, leaseAccessConditions := options.format() + resp, err := f.generated().StartCopy(ctx, copySource, opts, copyFileSmbInfo, leaseAccessConditions) + return resp, err +} + +// AbortCopy operation cancels a pending Copy File operation, and leaves a destination file with zero length and full metadata. +// - copyID: the copy identifier provided in the x-ms-copy-id header of the original Copy File operation. +// +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/abort-copy-file. +func (f *Client) AbortCopy(ctx context.Context, copyID string, options *AbortCopyOptions) (AbortCopyResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := f.generated().AbortCopy(ctx, copyID, opts, leaseAccessConditions) + return resp, err +} + +// Resize operation resizes the file to the specified size. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-file-properties. +func (f *Client) Resize(ctx context.Context, size int64, options *ResizeOptions) (ResizeResponse, error) { + fileAttributes, fileCreationTime, fileLastWriteTime, opts, leaseAccessConditions := options.format(size) + resp, err := f.generated().SetHTTPHeaders(ctx, fileAttributes, fileCreationTime, fileLastWriteTime, opts, nil, leaseAccessConditions) + return resp, err +} + +// UploadRange operation uploads a range of bytes to a file. +// - offset: Specifies the start byte at which the range of bytes is to be written. +// - body: Specifies the data to be uploaded. +// +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/put-range. +func (f *Client) UploadRange(ctx context.Context, offset int64, body io.ReadSeekCloser, options *UploadRangeOptions) (UploadRangeResponse, error) { + rangeParam, contentLength, uploadRangeOptions, leaseAccessConditions, err := options.format(offset, body) + if err != nil { + return UploadRangeResponse{}, err + } + + resp, err := f.generated().UploadRange(ctx, rangeParam, RangeWriteTypeUpdate, contentLength, body, uploadRangeOptions, leaseAccessConditions) + return resp, err +} + +// ClearRange operation clears the specified range and releases the space used in storage for that range. +// - contentRange: Specifies the range of bytes to be cleared. +// +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/put-range. +func (f *Client) ClearRange(ctx context.Context, contentRange HTTPRange, options *ClearRangeOptions) (ClearRangeResponse, error) { + rangeParam, leaseAccessConditions, err := options.format(contentRange) + if err != nil { + return ClearRangeResponse{}, err + } + + resp, err := f.generated().UploadRange(ctx, rangeParam, RangeWriteTypeClear, 0, nil, nil, leaseAccessConditions) + return resp, err +} + +// UploadRangeFromURL operation uploads a range of bytes to a file where the contents are read from a URL. +// - copySource: Specifies the URL of the source file or blob, up to 2 KB in length. +// - destinationRange: Specifies the range of bytes in the file to be written. +// - sourceRange: Bytes of source data in the specified range. +// +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/put-range-from-url. +func (f *Client) UploadRangeFromURL(ctx context.Context, copySource string, sourceOffset int64, destinationOffset int64, count int64, options *UploadRangeFromURLOptions) (UploadRangeFromURLResponse, error) { + destRange, opts, sourceModifiedAccessConditions, leaseAccessConditions, err := options.format(sourceOffset, destinationOffset, count) + if err != nil { + return UploadRangeFromURLResponse{}, err + } + + resp, err := f.generated().UploadRangeFromURL(ctx, destRange, copySource, 0, opts, sourceModifiedAccessConditions, leaseAccessConditions) + return resp, err +} + +// GetRangeList operation returns the list of valid ranges for a file. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/list-ranges. +func (f *Client) GetRangeList(ctx context.Context, options *GetRangeListOptions) (GetRangeListResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := f.generated().GetRangeList(ctx, opts, leaseAccessConditions) + return resp, err +} + +// ForceCloseHandles operation closes a handle or handles opened on a file. +// - handleID - Specifies the handle ID to be closed. Use an asterisk (*) as a wildcard string to specify all handles. +// +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/force-close-handles. +func (f *Client) ForceCloseHandles(ctx context.Context, handleID string, options *ForceCloseHandlesOptions) (ForceCloseHandlesResponse, error) { + opts := options.format() + resp, err := f.generated().ForceCloseHandles(ctx, handleID, opts) + return resp, err +} + +// ListHandles operation returns a list of open handles on a file. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/list-handles. +func (f *Client) ListHandles(ctx context.Context, options *ListHandlesOptions) (ListHandlesResponse, error) { + opts := options.format() + resp, err := f.generated().ListHandles(ctx, opts) + return resp, err +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at file. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (f *Client) GetSASURL(permissions sas.FilePermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { + if f.sharedKey() == nil { + return "", fileerror.MissingSharedKeyCredential + } + st := o.format() + + urlParts, err := ParseURL(f.URL()) + if err != nil { + return "", err + } + + qps, err := sas.SignatureValues{ + Version: sas.Version, + Protocol: sas.ProtocolHTTPS, + ShareName: urlParts.ShareName, + FilePath: urlParts.DirectoryOrFilePath, + Permissions: permissions.String(), + StartTime: st, + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(f.sharedKey()) + if err != nil { + return "", err + } + + endpoint := f.URL() + "?" + qps.Encode() + + return endpoint, nil +} + +// Concurrent Upload Functions ----------------------------------------------------------------------------------------- + +// uploadFromReader uploads a buffer in chunks to an Azure file. +func (f *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actualSize int64, o *uploadFromReaderOptions) error { + if actualSize > MaxFileSize { + return errors.New("buffer is too large to upload to a file") + } + if o.ChunkSize == 0 { + o.ChunkSize = MaxUpdateRangeBytes + } + + if log.Should(exported.EventUpload) { + urlParts, err := ParseURL(f.URL()) + if err == nil { + log.Writef(exported.EventUpload, "file name %s actual size %v chunk-size %v chunk-count %v", + urlParts.DirectoryOrFilePath, actualSize, o.ChunkSize, ((actualSize-1)/o.ChunkSize)+1) + } + } + + progress := int64(0) + progressLock := &sync.Mutex{} + + err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ + OperationName: "uploadFromReader", + TransferSize: actualSize, + ChunkSize: o.ChunkSize, + Concurrency: o.Concurrency, + Operation: func(ctx context.Context, offset int64, chunkSize int64) error { + // This function is called once per file range. + // It is passed this file's offset within the buffer and its count of bytes + // Prepare to read the proper range/section of the buffer + if chunkSize < o.ChunkSize { + // this is the last file range. Its actual size might be less + // than the calculated size due to rounding up of the payload + // size to fit in a whole number of chunks. + chunkSize = actualSize - offset + } + var body io.ReadSeeker = io.NewSectionReader(reader, offset, chunkSize) + if o.Progress != nil { + chunkProgress := int64(0) + body = streaming.NewRequestProgress(streaming.NopCloser(body), + func(bytesTransferred int64) { + diff := bytesTransferred - chunkProgress + chunkProgress = bytesTransferred + progressLock.Lock() // 1 goroutine at a time gets progress report + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + + uploadRangeOptions := o.getUploadRangeOptions() + _, err := f.UploadRange(ctx, offset, streaming.NopCloser(body), uploadRangeOptions) + return err + }, + }) + return err +} + +// UploadBuffer uploads a buffer in chunks to an Azure file. +func (f *Client) UploadBuffer(ctx context.Context, buffer []byte, options *UploadBufferOptions) error { + uploadOptions := uploadFromReaderOptions{} + if options != nil { + uploadOptions = *options + } + return f.uploadFromReader(ctx, bytes.NewReader(buffer), int64(len(buffer)), &uploadOptions) +} + +// UploadFile uploads a file in chunks to an Azure file. +func (f *Client) UploadFile(ctx context.Context, file *os.File, options *UploadFileOptions) error { + stat, err := file.Stat() + if err != nil { + return err + } + uploadOptions := uploadFromReaderOptions{} + if options != nil { + uploadOptions = *options + } + return f.uploadFromReader(ctx, file, stat.Size(), &uploadOptions) +} + +// UploadStream copies the file held in io.Reader to the file at fileClient. +// A Context deadline or cancellation will cause this to error. +func (f *Client) UploadStream(ctx context.Context, body io.Reader, options *UploadStreamOptions) error { + if options == nil { + options = &UploadStreamOptions{} + } + + err := copyFromReader(ctx, body, f, *options, newMMBPool) + return err +} + +// Concurrent Download Functions ----------------------------------------------------------------------------------------- + +// download method downloads an Azure file to a WriterAt in parallel. +func (f *Client) download(ctx context.Context, writer io.WriterAt, o downloadOptions) (int64, error) { + if o.ChunkSize == 0 { + o.ChunkSize = DefaultDownloadChunkSize + } + + count := o.Range.Count + if count == CountToEnd { // If size not specified, calculate it + // If we don't have the length at all, get it + getFilePropertiesOptions := o.getFilePropertiesOptions() + gr, err := f.GetProperties(ctx, getFilePropertiesOptions) + if err != nil { + return 0, err + } + count = *gr.ContentLength - o.Range.Offset + } + + if count <= 0 { + // The file is empty, there is nothing to download. + return 0, nil + } + + // Prepare and do parallel download. + progress := int64(0) + progressLock := &sync.Mutex{} + + err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ + OperationName: "downloadFileToWriterAt", + TransferSize: count, + ChunkSize: o.ChunkSize, + Concurrency: o.Concurrency, + Operation: func(ctx context.Context, chunkStart int64, count int64) error { + downloadFileOptions := o.getDownloadFileOptions(HTTPRange{ + Offset: chunkStart + o.Range.Offset, + Count: count, + }) + dr, err := f.DownloadStream(ctx, downloadFileOptions) + if err != nil { + return err + } + var body io.ReadCloser = dr.NewRetryReader(ctx, &o.RetryReaderOptionsPerChunk) + if o.Progress != nil { + rangeProgress := int64(0) + body = streaming.NewResponseProgress( + body, + func(bytesTransferred int64) { + diff := bytesTransferred - rangeProgress + rangeProgress = bytesTransferred + progressLock.Lock() + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + _, err = io.Copy(shared.NewSectionWriter(writer, chunkStart, count), body) + if err != nil { + return err + } + err = body.Close() + return err + }, + }) + if err != nil { + return 0, err + } + return count, nil +} + +// DownloadStream operation reads or downloads a file from the system, including its metadata and properties. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-file. +func (f *Client) DownloadStream(ctx context.Context, options *DownloadStreamOptions) (DownloadStreamResponse, error) { + opts, leaseAccessConditions := options.format() + if options == nil { + options = &DownloadStreamOptions{} + } + + resp, err := f.generated().Download(ctx, opts, leaseAccessConditions) + if err != nil { + return DownloadStreamResponse{}, err + } + + return DownloadStreamResponse{ + DownloadResponse: resp, + client: f, + getInfo: httpGetterInfo{Range: options.Range}, + leaseAccessConditions: options.LeaseAccessConditions, + }, err +} + +// DownloadBuffer downloads an Azure file to a buffer with parallel. +func (f *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *DownloadBufferOptions) (int64, error) { + if o == nil { + o = &DownloadBufferOptions{} + } + + return f.download(ctx, shared.NewBytesWriter(buffer), (downloadOptions)(*o)) +} + +// DownloadFile downloads an Azure file to a local file. +// The file would be truncated if the size doesn't match. +func (f *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFileOptions) (int64, error) { + if o == nil { + o = &DownloadFileOptions{} + } + do := (*downloadOptions)(o) + + // 1. Calculate the size of the destination file + var size int64 + + count := do.Range.Count + if count == CountToEnd { + // Try to get Azure file's size + getFilePropertiesOptions := do.getFilePropertiesOptions() + props, err := f.GetProperties(ctx, getFilePropertiesOptions) + if err != nil { + return 0, err + } + size = *props.ContentLength - do.Range.Offset + } else { + size = count + } + + // 2. Compare and try to resize local file's size if it doesn't match Azure file's size. + stat, err := file.Stat() + if err != nil { + return 0, err + } + if stat.Size() != size { + if err = file.Truncate(size); err != nil { + return 0, err + } + } + + if size > 0 { + return f.download(ctx, file, *do) + } else { // if the file's size is 0, there is no need in downloading it + return 0, nil + } +} diff --git a/sdk/storage/azfile/file/client_test.go b/sdk/storage/azfile/file/client_test.go new file mode 100644 index 000000000000..f04191104e1a --- /dev/null +++ b/sdk/storage/azfile/file/client_test.go @@ -0,0 +1,3121 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file_test + +import ( + "bytes" + "context" + "crypto/md5" + "crypto/rand" + "encoding/binary" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/testcommon" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "hash/crc64" + "io" + "io/ioutil" + "net/http" + "os" + "strings" + "testing" + "time" +) + +func Test(t *testing.T) { + recordMode := recording.GetRecordMode() + t.Logf("Running file Tests in %s mode\n", recordMode) + if recordMode == recording.LiveMode { + suite.Run(t, &FileRecordedTestsSuite{}) + suite.Run(t, &FileUnrecordedTestsSuite{}) + } else if recordMode == recording.PlaybackMode { + suite.Run(t, &FileRecordedTestsSuite{}) + } else if recordMode == recording.RecordingMode { + suite.Run(t, &FileRecordedTestsSuite{}) + } +} + +func (f *FileRecordedTestsSuite) BeforeTest(suite string, test string) { + testcommon.BeforeTest(f.T(), suite, test) +} + +func (f *FileRecordedTestsSuite) AfterTest(suite string, test string) { + testcommon.AfterTest(f.T(), suite, test) +} + +func (f *FileUnrecordedTestsSuite) BeforeTest(suite string, test string) { + +} + +func (f *FileUnrecordedTestsSuite) AfterTest(suite string, test string) { + +} + +type FileRecordedTestsSuite struct { + suite.Suite +} + +type FileUnrecordedTestsSuite struct { + suite.Suite +} + +func (f *FileRecordedTestsSuite) TestFileNewFileClient() { + _require := require.New(f.T()) + testName := f.T().Name() + + accountName, _ := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := svcClient.NewShareClient(shareName) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := shareClient.NewDirectoryClient(dirName) + + fileName := testcommon.GenerateFileName(testName) + fileClient := dirClient.NewFileClient(fileName) + + correctURL := "https://" + accountName + ".file.core.windows.net/" + shareName + "/" + dirName + "/" + fileName + _require.Equal(fileClient.URL(), correctURL) + + rootFileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + + correctURL = "https://" + accountName + ".file.core.windows.net/" + shareName + "/" + fileName + _require.Equal(rootFileClient.URL(), correctURL) +} + +func (f *FileRecordedTestsSuite) TestFileCreateUsingSharedKey() { + _require := require.New(f.T()) + testName := f.T().Name() + + cred, err := testcommon.GetGenericSharedKeyCredential(testcommon.TestAccountDefault) + _require.NoError(err) + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + fileName := testcommon.GenerateFileName(testName) + fileURL := "https://" + cred.AccountName() + ".file.core.windows.net/" + shareName + "/" + dirName + "/" + fileName + + options := &file.ClientOptions{} + testcommon.SetClientOptions(f.T(), &options.ClientOptions) + fileClient, err := file.NewClientWithSharedKeyCredential(fileURL, cred, options) + _require.NoError(err) + + // creating file where directory does not exist gives ParentNotFound error + _, err = fileClient.Create(context.Background(), 1024, nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ParentNotFound) + + testcommon.CreateNewDirectory(context.Background(), _require, dirName, shareClient) + + resp, err := fileClient.Create(context.Background(), 1024, nil) + _require.NoError(err) + _require.NotNil(resp.ETag) + _require.NotNil(resp.RequestID) + _require.Equal(resp.LastModified.IsZero(), false) + _require.Equal(resp.FileCreationTime.IsZero(), false) + _require.Equal(resp.FileLastWriteTime.IsZero(), false) + _require.Equal(resp.FileChangeTime.IsZero(), false) +} + +func (f *FileRecordedTestsSuite) TestFileCreateUsingConnectionString() { + _require := require.New(f.T()) + testName := f.T().Name() + + connString, err := testcommon.GetGenericConnectionString(testcommon.TestAccountDefault) + _require.NoError(err) + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + fileName := testcommon.GenerateFileName(testName) + options := &file.ClientOptions{} + testcommon.SetClientOptions(f.T(), &options.ClientOptions) + fileClient1, err := file.NewClientFromConnectionString(*connString, shareName, fileName, options) + _require.NoError(err) + + resp, err := fileClient1.Create(context.Background(), 1024, nil) + _require.NoError(err) + _require.NotNil(resp.ETag) + _require.NotNil(resp.RequestID) + _require.Equal(resp.LastModified.IsZero(), false) + _require.Equal(resp.FileCreationTime.IsZero(), false) + _require.Equal(resp.FileLastWriteTime.IsZero(), false) + _require.Equal(resp.FileChangeTime.IsZero(), false) + + filePath := dirName + "/" + fileName + fileClient2, err := file.NewClientFromConnectionString(*connString, shareName, filePath, options) + _require.NoError(err) + + _, err = fileClient2.Create(context.Background(), 1024, nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ParentNotFound) + + testcommon.CreateNewDirectory(context.Background(), _require, dirName, shareClient) + + // using '\' as path separator + filePath = dirName + "\\" + fileName + fileClient3, err := file.NewClientFromConnectionString(*connString, shareName, filePath, options) + _require.NoError(err) + + resp, err = fileClient3.Create(context.Background(), 1024, nil) + _require.NoError(err) + _require.NotNil(resp.RequestID) + _require.Equal(resp.LastModified.IsZero(), false) + _require.Equal(resp.FileCreationTime.IsZero(), false) +} + +func (f *FileUnrecordedTestsSuite) TestFileClientUsingSAS() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := testcommon.CreateNewDirectory(context.Background(), _require, dirName, shareClient) + + fileName := testcommon.GenerateFileName(testName) + fileClient := dirClient.NewFileClient(fileName) + + permissions := sas.FilePermissions{ + Read: true, + Write: true, + Delete: true, + Create: true, + } + expiry := time.Now().Add(time.Hour) + + fileSASURL, err := fileClient.GetSASURL(permissions, expiry, nil) + _require.NoError(err) + + fileSASClient, err := file.NewClientWithNoCredential(fileSASURL, nil) + _require.NoError(err) + + _, err = fileSASClient.GetProperties(context.Background(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ResourceNotFound) + + _, err = fileSASClient.Create(context.Background(), 1024, nil) + _require.NoError(err) + + resp, err := fileSASClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(resp.RequestID) + _require.Equal(resp.LastModified.IsZero(), false) + _require.Equal(resp.FileCreationTime.IsZero(), false) +} + +func (f *FileRecordedTestsSuite) TestFileCreateDeleteDefault() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fileName := testcommon.GenerateFileName(testName) + rootDirClient := shareClient.NewRootDirectoryClient() + _require.NoError(err) + + fClient := rootDirClient.NewFileClient(fileName) + + // Create and delete file in root directory. + cResp, err := fClient.Create(context.Background(), 1024, nil) + _require.NoError(err) + _require.NotNil(cResp.ETag) + _require.Equal(cResp.LastModified.IsZero(), false) + _require.NotNil(cResp.RequestID) + _require.NotNil(cResp.Version) + _require.Equal(cResp.Date.IsZero(), false) + _require.NotNil(cResp.IsServerEncrypted) + + delResp, err := fClient.Delete(context.Background(), nil) + _require.NoError(err) + _require.NotNil(delResp.RequestID) + _require.NotNil(delResp.Version) + _require.Equal(delResp.Date.IsZero(), false) + + dirClient := testcommon.CreateNewDirectory(context.Background(), _require, testcommon.GenerateDirectoryName(testName), shareClient) + + // Create and delete file in named directory. + afClient := dirClient.NewFileClient(fileName) + + cResp, err = afClient.Create(context.Background(), 1024, nil) + _require.NoError(err) + _require.NotNil(cResp.ETag) + _require.Equal(cResp.LastModified.IsZero(), false) + _require.NotNil(cResp.RequestID) + _require.NotNil(cResp.Version) + _require.Equal(cResp.Date.IsZero(), false) + _require.NotNil(cResp.IsServerEncrypted) + + delResp, err = afClient.Delete(context.Background(), nil) + _require.NoError(err) + _require.NotNil(delResp.RequestID) + _require.NotNil(delResp.Version) + _require.Equal(delResp.Date.IsZero(), false) +} + +func (f *FileRecordedTestsSuite) TestFileCreateNonDefaultMetadataNonEmpty() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + + _, err = fClient.Create(context.Background(), 1024, &file.CreateOptions{ + Metadata: testcommon.BasicMetadata, + }) + _require.NoError(err) + + resp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Len(resp.Metadata, len(testcommon.BasicMetadata)) + for k, v := range resp.Metadata { + val := testcommon.BasicMetadata[strings.ToLower(k)] + _require.NotNil(val) + _require.Equal(*v, *val) + } +} + +func (f *FileRecordedTestsSuite) TestFileCreateNonDefaultHTTPHeaders() { + _require := require.New(f.T()) + testName := f.T().Name() + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + + httpHeaders := file.HTTPHeaders{ + ContentType: to.Ptr("my_type"), + ContentDisposition: to.Ptr("my_disposition"), + CacheControl: to.Ptr("control"), + ContentMD5: nil, + ContentLanguage: to.Ptr("my_language"), + ContentEncoding: to.Ptr("my_encoding"), + } + + _, err = fClient.Create(context.Background(), 1024, &file.CreateOptions{ + HTTPHeaders: &httpHeaders, + }) + _require.NoError(err) + + resp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.EqualValues(resp.ContentType, httpHeaders.ContentType) + _require.EqualValues(resp.ContentDisposition, httpHeaders.ContentDisposition) + _require.EqualValues(resp.CacheControl, httpHeaders.CacheControl) + _require.EqualValues(resp.ContentLanguage, httpHeaders.ContentLanguage) + _require.EqualValues(resp.ContentEncoding, httpHeaders.ContentEncoding) + _require.Nil(resp.ContentMD5) +} + +func (f *FileRecordedTestsSuite) TestFileCreateNegativeMetadataInvalid() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + + _, err = fClient.Create(context.Background(), 1024, &file.CreateOptions{ + Metadata: map[string]*string{"!@#$%^&*()": to.Ptr("!@#$%^&*()")}, + HTTPHeaders: &file.HTTPHeaders{}, + }) + _require.Error(err) +} + +func (f *FileUnrecordedTestsSuite) TestFileGetSetPropertiesNonDefault() { + _require := require.New(f.T()) + testName := f.T().Name() + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + md5Str := "MDAwMDAwMDA=" + testMd5 := []byte(md5Str) + + creationTime := time.Now().Add(-time.Hour) + lastWriteTime := time.Now().Add(-time.Minute * 15) + + options := &file.SetHTTPHeadersOptions{ + Permissions: &file.Permissions{Permission: &testcommon.SampleSDDL}, + SMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{Hidden: true}, + CreationTime: &creationTime, + LastWriteTime: &lastWriteTime, + }, + HTTPHeaders: &file.HTTPHeaders{ + ContentType: to.Ptr("text/html"), + ContentEncoding: to.Ptr("gzip"), + ContentLanguage: to.Ptr("en"), + ContentMD5: testMd5, + CacheControl: to.Ptr("no-transform"), + ContentDisposition: to.Ptr("attachment"), + }, + } + setResp, err := fClient.SetHTTPHeaders(context.Background(), options) + _require.NoError(err) + _require.NotNil(setResp.ETag) + _require.Equal(setResp.LastModified.IsZero(), false) + _require.NotNil(setResp.RequestID) + _require.NotNil(setResp.Version) + _require.Equal(setResp.Date.IsZero(), false) + _require.NotNil(setResp.IsServerEncrypted) + + getResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(setResp.LastModified.IsZero(), false) + _require.Equal(*getResp.FileType, "File") + + _require.EqualValues(getResp.ContentType, options.HTTPHeaders.ContentType) + _require.EqualValues(getResp.ContentEncoding, options.HTTPHeaders.ContentEncoding) + _require.EqualValues(getResp.ContentLanguage, options.HTTPHeaders.ContentLanguage) + _require.EqualValues(getResp.ContentMD5, options.HTTPHeaders.ContentMD5) + _require.EqualValues(getResp.CacheControl, options.HTTPHeaders.CacheControl) + _require.EqualValues(getResp.ContentDisposition, options.HTTPHeaders.ContentDisposition) + _require.Equal(*getResp.ContentLength, int64(0)) + // We'll just ensure a permission exists, no need to test overlapping functionality. + _require.NotEqual(getResp.FilePermissionKey, "") + _require.Equal(*getResp.FileAttributes, options.SMBProperties.Attributes.String()) + + _require.EqualValues((*getResp.FileCreationTime).Format(testcommon.ISO8601), creationTime.UTC().Format(testcommon.ISO8601)) + _require.EqualValues((*getResp.FileLastWriteTime).Format(testcommon.ISO8601), lastWriteTime.UTC().Format(testcommon.ISO8601)) + + _require.NotNil(getResp.ETag) + _require.NotNil(getResp.RequestID) + _require.NotNil(getResp.Version) + _require.Equal(getResp.Date.IsZero(), false) + _require.NotNil(getResp.IsServerEncrypted) +} + +func (f *FileRecordedTestsSuite) TestFileGetSetPropertiesDefault() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 0, shareClient) + + setResp, err := fClient.SetHTTPHeaders(context.Background(), nil) + _require.NoError(err) + _require.NotEqual(*setResp.ETag, "") + _require.Equal(setResp.LastModified.IsZero(), false) + _require.NotEqual(setResp.RequestID, "") + _require.NotEqual(setResp.Version, "") + _require.Equal(setResp.Date.IsZero(), false) + _require.NotNil(setResp.IsServerEncrypted) + + metadata := map[string]*string{ + "Foo": to.Ptr("Foovalue"), + "Bar": to.Ptr("Barvalue"), + } + _, err = fClient.SetMetadata(context.Background(), &file.SetMetadataOptions{ + Metadata: metadata, + }) + _require.NoError(err) + + // get properties on the share snapshot + getResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(setResp.LastModified.IsZero(), false) + _require.Equal(*getResp.FileType, "File") + + _require.Nil(getResp.ContentType) + _require.Nil(getResp.ContentEncoding) + _require.Nil(getResp.ContentLanguage) + _require.Nil(getResp.ContentMD5) + _require.Nil(getResp.CacheControl) + _require.Nil(getResp.ContentDisposition) + _require.Equal(*getResp.ContentLength, int64(0)) + + _require.NotNil(getResp.ETag) + _require.NotNil(getResp.RequestID) + _require.NotNil(getResp.Version) + _require.Equal(getResp.Date.IsZero(), false) + _require.NotNil(getResp.IsServerEncrypted) + _require.EqualValues(getResp.Metadata, metadata) +} + +func (f *FileRecordedTestsSuite) TestFilePreservePermissions() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), 0, &file.CreateOptions{ + Permissions: &file.Permissions{ + Permission: &testcommon.SampleSDDL, + }, + }) + _require.NoError(err) + + // Grab the original perm key before we set file headers. + getResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + + pKey := getResp.FilePermissionKey + cTime := getResp.FileCreationTime + lwTime := getResp.FileLastWriteTime + attribs := getResp.FileAttributes + + md5Str := "MDAwMDAwMDA=" + testMd5 := []byte(md5Str) + + properties := file.SetHTTPHeadersOptions{ + HTTPHeaders: &file.HTTPHeaders{ + ContentType: to.Ptr("text/html"), + ContentEncoding: to.Ptr("gzip"), + ContentLanguage: to.Ptr("en"), + ContentMD5: testMd5, + CacheControl: to.Ptr("no-transform"), + ContentDisposition: to.Ptr("attachment"), + }, + // SMBProperties, when options are left nil, leads to preserving. + SMBProperties: &file.SMBProperties{}, + } + + setResp, err := fClient.SetHTTPHeaders(context.Background(), &properties) + _require.NoError(err) + _require.NotNil(setResp.ETag) + _require.NotNil(setResp.RequestID) + _require.NotNil(setResp.LastModified) + _require.Equal(setResp.LastModified.IsZero(), false) + _require.NotNil(setResp.Version) + _require.Equal(setResp.Date.IsZero(), false) + + getResp, err = fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(setResp.LastModified) + _require.Equal(setResp.LastModified.IsZero(), false) + _require.Equal(*getResp.FileType, "File") + + _require.EqualValues(getResp.ContentType, properties.HTTPHeaders.ContentType) + _require.EqualValues(getResp.ContentEncoding, properties.HTTPHeaders.ContentEncoding) + _require.EqualValues(getResp.ContentLanguage, properties.HTTPHeaders.ContentLanguage) + _require.EqualValues(getResp.ContentMD5, properties.HTTPHeaders.ContentMD5) + _require.EqualValues(getResp.CacheControl, properties.HTTPHeaders.CacheControl) + _require.EqualValues(getResp.ContentDisposition, properties.HTTPHeaders.ContentDisposition) + _require.Equal(*getResp.ContentLength, int64(0)) + // Ensure that the permission key gets preserved + _require.EqualValues(getResp.FilePermissionKey, pKey) + _require.EqualValues(cTime, getResp.FileCreationTime) + _require.EqualValues(lwTime, getResp.FileLastWriteTime) + _require.EqualValues(attribs, getResp.FileAttributes) + + _require.NotNil(getResp.ETag) + _require.NotNil(getResp.RequestID) + _require.NotNil(getResp.Version) + _require.Equal(getResp.Date.IsZero(), false) + _require.NotNil(getResp.IsServerEncrypted) +} + +func (f *FileRecordedTestsSuite) TestFileGetSetPropertiesSnapshot() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer func() { + _, err := shareClient.Delete(context.Background(), &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) + _require.NoError(err) + }() + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + md5Str := "MDAwMDAwMDA=" + testMd5 := []byte(md5Str) + + fileSetHTTPHeadersOptions := file.SetHTTPHeadersOptions{ + HTTPHeaders: &file.HTTPHeaders{ + ContentType: to.Ptr("text/html"), + ContentEncoding: to.Ptr("gzip"), + ContentLanguage: to.Ptr("en"), + ContentMD5: testMd5, + CacheControl: to.Ptr("no-transform"), + ContentDisposition: to.Ptr("attachment"), + }, + } + setResp, err := fClient.SetHTTPHeaders(context.Background(), &fileSetHTTPHeadersOptions) + _require.NoError(err) + _require.NotEqual(*setResp.ETag, "") + _require.Equal(setResp.LastModified.IsZero(), false) + _require.NotEqual(setResp.RequestID, "") + _require.NotEqual(setResp.Version, "") + _require.Equal(setResp.Date.IsZero(), false) + _require.NotNil(setResp.IsServerEncrypted) + + metadata := map[string]*string{ + "Foo": to.Ptr("Foovalue"), + "Bar": to.Ptr("Barvalue"), + } + _, err = fClient.SetMetadata(context.Background(), &file.SetMetadataOptions{ + Metadata: metadata, + }) + _require.NoError(err) + + resp, err := shareClient.CreateSnapshot(context.Background(), &share.CreateSnapshotOptions{Metadata: map[string]*string{}}) + _require.NoError(err) + _require.NotNil(resp.Snapshot) + + // get properties on the share snapshot + getResp, err := fClient.GetProperties(context.Background(), &file.GetPropertiesOptions{ + ShareSnapshot: resp.Snapshot, + }) + _require.NoError(err) + _require.Equal(setResp.LastModified.IsZero(), false) + _require.Equal(*getResp.FileType, "File") + + _require.EqualValues(getResp.ContentType, fileSetHTTPHeadersOptions.HTTPHeaders.ContentType) + _require.EqualValues(getResp.ContentEncoding, fileSetHTTPHeadersOptions.HTTPHeaders.ContentEncoding) + _require.EqualValues(getResp.ContentLanguage, fileSetHTTPHeadersOptions.HTTPHeaders.ContentLanguage) + _require.EqualValues(getResp.ContentMD5, fileSetHTTPHeadersOptions.HTTPHeaders.ContentMD5) + _require.EqualValues(getResp.CacheControl, fileSetHTTPHeadersOptions.HTTPHeaders.CacheControl) + _require.EqualValues(getResp.ContentDisposition, fileSetHTTPHeadersOptions.HTTPHeaders.ContentDisposition) + _require.Equal(*getResp.ContentLength, int64(0)) + + _require.NotNil(getResp.ETag) + _require.NotNil(getResp.RequestID) + _require.NotNil(getResp.Version) + _require.Equal(getResp.Date.IsZero(), false) + _require.NotNil(getResp.IsServerEncrypted) + _require.EqualValues(getResp.Metadata, metadata) +} + +func (f *FileRecordedTestsSuite) TestGetSetMetadataNonDefault() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + metadata := map[string]*string{ + "Foo": to.Ptr("Foovalue"), + "Bar": to.Ptr("Barvalue"), + } + setResp, err := fClient.SetMetadata(context.Background(), &file.SetMetadataOptions{ + Metadata: metadata, + }) + _require.NoError(err) + _require.NotNil(setResp.ETag) + _require.NotNil(setResp.RequestID) + _require.NotNil(setResp.Version) + _require.Equal(setResp.Date.IsZero(), false) + _require.NotNil(setResp.IsServerEncrypted) + + getResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(getResp.ETag) + _require.NotNil(getResp.RequestID) + _require.NotNil(getResp.Version) + _require.Equal(getResp.Date.IsZero(), false) + _require.NotNil(getResp.IsServerEncrypted) + _require.EqualValues(getResp.Metadata, metadata) +} + +func (f *FileRecordedTestsSuite) TestFileSetMetadataNil() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + md := map[string]*string{"Not": to.Ptr("nil")} + + _, err = fClient.SetMetadata(context.Background(), &file.SetMetadataOptions{ + Metadata: md, + }) + _require.NoError(err) + + resp1, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.EqualValues(resp1.Metadata, md) + + _, err = fClient.SetMetadata(context.Background(), nil) + _require.NoError(err) + + resp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Len(resp2.Metadata, 0) +} + +func (f *FileRecordedTestsSuite) TestFileSetMetadataDefaultEmpty() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + md := map[string]*string{"Not": to.Ptr("nil")} + + _, err = fClient.SetMetadata(context.Background(), &file.SetMetadataOptions{ + Metadata: md, + }) + _require.NoError(err) + + resp1, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.EqualValues(resp1.Metadata, md) + + _, err = fClient.SetMetadata(context.Background(), &file.SetMetadataOptions{ + Metadata: map[string]*string{}, + }) + _require.NoError(err) + + resp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Len(resp2.Metadata, 0) +} + +func (f *FileRecordedTestsSuite) TestFileSetMetadataInvalidField() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + _, err = fClient.SetMetadata(context.Background(), &file.SetMetadataOptions{ + Metadata: map[string]*string{"!@#$%^&*()": to.Ptr("!@#$%^&*()")}, + }) + _require.Error(err) +} + +func (f *FileRecordedTestsSuite) TestStartCopyDefault() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + srcFile := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + destFile := shareClient.NewRootDirectoryClient().NewFileClient("dest" + testcommon.GenerateFileName(testName)) + + fileSize := int64(2048) + _, err = srcFile.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + contentR, srcContent := testcommon.GenerateData(int(fileSize)) + srcContentMD5 := md5.Sum(srcContent) + + _, err = srcFile.UploadRange(context.Background(), 0, contentR, nil) + _require.NoError(err) + + copyResp, err := destFile.StartCopyFromURL(context.Background(), srcFile.URL(), nil) + _require.NoError(err) + _require.NotNil(copyResp.ETag) + _require.Equal(copyResp.LastModified.IsZero(), false) + _require.NotNil(copyResp.RequestID) + _require.NotNil(copyResp.Version) + _require.Equal(copyResp.Date.IsZero(), false) + _require.NotEqual(copyResp.CopyStatus, "") + + time.Sleep(time.Duration(5) * time.Second) + + getResp, err := destFile.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.EqualValues(getResp.CopyID, copyResp.CopyID) + _require.NotEqual(*getResp.CopyStatus, "") + _require.Equal(*getResp.CopySource, srcFile.URL()) + _require.Equal(*getResp.CopyStatus, file.CopyStatusTypeSuccess) + + // Abort will fail after copy finished + _, err = destFile.AbortCopy(context.Background(), *copyResp.CopyID, nil) + _require.Error(err) + testcommon.ValidateHTTPErrorCode(_require, err, http.StatusConflict) + + // validate data copied + dResp, err := destFile.DownloadStream(context.Background(), &file.DownloadStreamOptions{ + Range: file.HTTPRange{Offset: 0, Count: fileSize}, + RangeGetContentMD5: to.Ptr(true), + }) + _require.NoError(err) + + destContent, err := io.ReadAll(dResp.Body) + _require.NoError(err) + _require.EqualValues(srcContent, destContent) + _require.Equal(dResp.ContentMD5, srcContentMD5[:]) +} + +func (f *FileRecordedTestsSuite) TestFileStartCopyDestEmpty() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShareWithData(context.Background(), _require, "src"+testcommon.GenerateFileName(testName), shareClient) + copyFClient := testcommon.GetFileClientFromShare("dest"+testcommon.GenerateFileName(testName), shareClient) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), nil) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + resp, err := copyFClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + // Read the file data to verify the copy + data, err := ioutil.ReadAll(resp.Body) + defer func() { + err = resp.Body.Close() + _require.NoError(err) + }() + + _require.NoError(err) + _require.Equal(*resp.ContentLength, int64(len(testcommon.FileDefaultData))) + _require.Equal(string(data), testcommon.FileDefaultData) +} + +func (f *FileRecordedTestsSuite) TestFileStartCopyMetadata() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + basicMetadata := map[string]*string{ + "Foo": to.Ptr("Foovalue"), + "Bar": to.Ptr("Barvalue"), + } + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), &file.StartCopyFromURLOptions{Metadata: basicMetadata}) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + resp2, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.EqualValues(resp2.Metadata, basicMetadata) +} + +func (f *FileRecordedTestsSuite) TestFileStartCopyMetadataNil() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + basicMetadata := map[string]*string{ + "Foo": to.Ptr("Foovalue"), + "Bar": to.Ptr("Barvalue"), + } + + // Have the destination start with metadata so we ensure the nil metadata passed later takes effect + _, err = copyFClient.Create(context.Background(), 0, &file.CreateOptions{Metadata: basicMetadata}) + _require.NoError(err) + + gResp, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.EqualValues(gResp.Metadata, basicMetadata) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), nil) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + resp2, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Len(resp2.Metadata, 0) +} + +func (f *FileRecordedTestsSuite) TestFileStartCopyMetadataEmpty() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + basicMetadata := map[string]*string{ + "Foo": to.Ptr("Foovalue"), + "Bar": to.Ptr("Barvalue"), + } + + // Have the destination start with metadata so we ensure the nil metadata passed later takes effect + _, err = copyFClient.Create(context.Background(), 0, &file.CreateOptions{Metadata: basicMetadata}) + _require.NoError(err) + + gResp, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.EqualValues(gResp.Metadata, basicMetadata) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), &file.StartCopyFromURLOptions{Metadata: map[string]*string{}}) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + resp2, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Len(resp2.Metadata, 0) +} + +func (f *FileRecordedTestsSuite) TestFileStartCopyNegativeMetadataInvalidField() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), &file.StartCopyFromURLOptions{ + Metadata: map[string]*string{"!@#$%^&*()": to.Ptr("!@#$%^&*()")}, + }) + _require.Error(err) +} + +func (f *FileRecordedTestsSuite) TestFileStartCopySourceCreationTime() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + currTime, err := time.Parse(time.UnixDate, "Fri Mar 31 21:00:00 GMT 2023") + _require.NoError(err) + + cResp, err := fClient.Create(context.Background(), 0, &file.CreateOptions{ + SMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{ReadOnly: true, Hidden: true}, + CreationTime: to.Ptr(currTime.Add(5 * time.Minute)), + LastWriteTime: to.Ptr(currTime.Add(2 * time.Minute)), + }, + }) + _require.NoError(err) + _require.NotNil(cResp.FileCreationTime) + _require.NotNil(cResp.FileLastWriteTime) + _require.NotNil(cResp.FileAttributes) + _require.NotNil(cResp.FilePermissionKey) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), &file.StartCopyFromURLOptions{ + CopyFileSMBInfo: &file.CopyFileSMBInfo{ + CreationTime: file.SourceCopyFileCreationTime{}, + }, + }) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + resp2, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.EqualValues(resp2.FileCreationTime, cResp.FileCreationTime) + _require.NotEqualValues(resp2.FileLastWriteTime, cResp.FileLastWriteTime) + _require.NotEqualValues(resp2.FileAttributes, cResp.FileAttributes) +} + +func (f *FileRecordedTestsSuite) TestFileStartCopySourceProperties() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + currTime, err := time.Parse(time.UnixDate, "Fri Mar 31 20:00:00 GMT 2023") + _require.NoError(err) + + cResp, err := fClient.Create(context.Background(), 0, &file.CreateOptions{ + SMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{System: true}, + CreationTime: to.Ptr(currTime.Add(1 * time.Minute)), + LastWriteTime: to.Ptr(currTime.Add(2 * time.Minute)), + }, + }) + _require.NoError(err) + _require.NotNil(cResp.FileCreationTime) + _require.NotNil(cResp.FileLastWriteTime) + _require.NotNil(cResp.FileAttributes) + _require.NotNil(cResp.FilePermissionKey) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), &file.StartCopyFromURLOptions{ + CopyFileSMBInfo: &file.CopyFileSMBInfo{ + CreationTime: file.SourceCopyFileCreationTime{}, + LastWriteTime: file.SourceCopyFileLastWriteTime{}, + Attributes: file.SourceCopyFileAttributes{}, + PermissionCopyMode: to.Ptr(file.PermissionCopyModeTypeSource), + }, + }) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + resp2, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.EqualValues(resp2.FileCreationTime, cResp.FileCreationTime) + _require.EqualValues(resp2.FileLastWriteTime, cResp.FileLastWriteTime) + _require.EqualValues(resp2.FileAttributes, cResp.FileAttributes) + _require.EqualValues(resp2.FilePermissionKey, cResp.FilePermissionKey) +} + +func (f *FileRecordedTestsSuite) TestFileStartCopyDifferentProperties() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + currTime, err := time.Parse(time.UnixDate, "Fri Mar 31 20:00:00 GMT 2023") + _require.NoError(err) + + cResp, err := fClient.Create(context.Background(), 0, &file.CreateOptions{ + SMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{System: true}, + CreationTime: to.Ptr(currTime.Add(1 * time.Minute)), + LastWriteTime: to.Ptr(currTime.Add(2 * time.Minute)), + }, + }) + _require.NoError(err) + _require.NotNil(cResp.FileCreationTime) + _require.NotNil(cResp.FileLastWriteTime) + _require.NotNil(cResp.FileAttributes) + _require.NotNil(cResp.FilePermissionKey) + + destCreationTime := currTime.Add(5 * time.Minute) + destLastWriteTIme := currTime.Add(6 * time.Minute) + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), &file.StartCopyFromURLOptions{ + CopyFileSMBInfo: &file.CopyFileSMBInfo{ + CreationTime: file.DestinationCopyFileCreationTime(destCreationTime), + LastWriteTime: file.DestinationCopyFileLastWriteTime(destLastWriteTIme), + Attributes: file.DestinationCopyFileAttributes{ReadOnly: true}, + }, + }) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + resp2, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotEqualValues(resp2.FileCreationTime, cResp.FileCreationTime) + _require.EqualValues(*resp2.FileCreationTime, destCreationTime.UTC()) + _require.NotEqualValues(resp2.FileLastWriteTime, cResp.FileLastWriteTime) + _require.EqualValues(*resp2.FileLastWriteTime, destLastWriteTIme.UTC()) + _require.NotEqualValues(resp2.FileAttributes, cResp.FileAttributes) + _require.EqualValues(resp2.FilePermissionKey, cResp.FilePermissionKey) +} + +func (f *FileRecordedTestsSuite) TestFileStartCopyOverrideMode() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + cResp, err := fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + _require.NotNil(cResp.FileCreationTime) + _require.NotNil(cResp.FileLastWriteTime) + _require.NotNil(cResp.FileAttributes) + _require.NotNil(cResp.FilePermissionKey) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), &file.StartCopyFromURLOptions{ + Permissions: &file.Permissions{ + Permission: to.Ptr(testcommon.SampleSDDL), + }, + CopyFileSMBInfo: &file.CopyFileSMBInfo{ + PermissionCopyMode: to.Ptr(file.PermissionCopyModeTypeOverride), + }, + }) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + resp2, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotEqualValues(resp2.FileCreationTime, cResp.FileCreationTime) + _require.NotEqualValues(resp2.FileLastWriteTime, cResp.FileLastWriteTime) + _require.NotEqualValues(resp2.FilePermissionKey, cResp.FilePermissionKey) +} + +func (f *FileRecordedTestsSuite) TestNegativeFileStartCopyOverrideMode() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + cResp, err := fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + _require.NotNil(cResp.FileCreationTime) + _require.NotNil(cResp.FileLastWriteTime) + _require.NotNil(cResp.FileAttributes) + _require.NotNil(cResp.FilePermissionKey) + + // permission or permission key is required when the PermissionCopyMode is override. + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), &file.StartCopyFromURLOptions{ + CopyFileSMBInfo: &file.CopyFileSMBInfo{ + PermissionCopyMode: to.Ptr(file.PermissionCopyModeTypeOverride), + }, + }) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.MissingRequiredHeader) +} + +func (f *FileRecordedTestsSuite) TestFileStartCopySetArchiveAttributeTrue() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + cResp, err := fClient.Create(context.Background(), 0, &file.CreateOptions{ + SMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{ReadOnly: true, Hidden: true}, + }, + }) + _require.NoError(err) + _require.NotNil(cResp.FileCreationTime) + _require.NotNil(cResp.FileLastWriteTime) + _require.NotNil(cResp.FileAttributes) + _require.NotNil(cResp.FilePermissionKey) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), &file.StartCopyFromURLOptions{ + CopyFileSMBInfo: &file.CopyFileSMBInfo{ + Attributes: file.DestinationCopyFileAttributes{System: true, ReadOnly: true}, + SetArchiveAttribute: to.Ptr(true), + }, + }) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + resp2, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotEqualValues(resp2.FileCreationTime, cResp.FileCreationTime) + _require.NotEqualValues(resp2.FileLastWriteTime, cResp.FileLastWriteTime) + _require.Contains(*resp2.FileAttributes, "Archive") +} + +func (f *FileRecordedTestsSuite) TestFileStartCopySetArchiveAttributeFalse() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + cResp, err := fClient.Create(context.Background(), 0, &file.CreateOptions{ + SMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{ReadOnly: true, Hidden: true}, + }, + }) + _require.NoError(err) + _require.NotNil(cResp.FileCreationTime) + _require.NotNil(cResp.FileLastWriteTime) + _require.NotNil(cResp.FileAttributes) + _require.NotNil(cResp.FilePermissionKey) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), &file.StartCopyFromURLOptions{ + CopyFileSMBInfo: &file.CopyFileSMBInfo{ + Attributes: file.DestinationCopyFileAttributes{System: true, ReadOnly: true}, + SetArchiveAttribute: to.Ptr(false), + }, + }) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + resp2, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotEqualValues(resp2.FileCreationTime, cResp.FileCreationTime) + _require.NotEqualValues(resp2.FileLastWriteTime, cResp.FileLastWriteTime) + _require.NotContains(*resp2.FileAttributes, "Archive") +} + +func (f *FileRecordedTestsSuite) TestFileStartCopyDestReadOnly() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + cResp, err := fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + _require.NotNil(cResp.FileCreationTime) + _require.NotNil(cResp.FileLastWriteTime) + _require.NotNil(cResp.FileAttributes) + _require.NotNil(cResp.FilePermissionKey) + + _, err = copyFClient.Create(context.Background(), 0, &file.CreateOptions{ + SMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{ReadOnly: true}, + }, + }) + _require.NoError(err) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), &file.StartCopyFromURLOptions{ + CopyFileSMBInfo: &file.CopyFileSMBInfo{ + IgnoreReadOnly: to.Ptr(true), + }, + }) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + resp2, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotEqualValues(resp2.FileCreationTime, cResp.FileCreationTime) + _require.NotEqualValues(resp2.FileLastWriteTime, cResp.FileLastWriteTime) +} + +func (f *FileRecordedTestsSuite) TestNegativeFileStartCopyDestReadOnly() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + cResp, err := fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + _require.NotNil(cResp.FileCreationTime) + _require.NotNil(cResp.FileLastWriteTime) + _require.NotNil(cResp.FileAttributes) + _require.NotNil(cResp.FilePermissionKey) + + _, err = copyFClient.Create(context.Background(), 0, &file.CreateOptions{ + SMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{ReadOnly: true}, + }, + }) + _require.NoError(err) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ReadOnlyAttribute) +} + +func (f *FileRecordedTestsSuite) TestFileStartCopySourceNonExistent() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), nil) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ResourceNotFound) +} + +func (f *FileUnrecordedTestsSuite) TestFileStartCopyUsingSASSrc() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, "src"+shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fileName := testcommon.GenerateFileName(testName) + fClient := testcommon.CreateNewFileFromShareWithData(context.Background(), _require, "src"+fileName, shareClient) + + fileURLWithSAS, err := fClient.GetSASURL(sas.FilePermissions{Read: true, Write: true, Create: true, Delete: true}, time.Now().Add(5*time.Minute).UTC(), nil) + _require.NoError(err) + + // Create a new share for the destination + copyShareClient := testcommon.CreateNewShare(context.Background(), _require, "dest"+shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, copyShareClient) + + copyFileClient := testcommon.GetFileClientFromShare("dst"+fileName, copyShareClient) + + _, err = copyFileClient.StartCopyFromURL(context.Background(), fileURLWithSAS, nil) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + dResp, err := copyFileClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + data, err := ioutil.ReadAll(dResp.Body) + defer func() { + err = dResp.Body.Close() + _require.NoError(err) + }() + + _require.NoError(err) + _require.Equal(*dResp.ContentLength, int64(len(testcommon.FileDefaultData))) + _require.Equal(string(data), testcommon.FileDefaultData) +} + +func (f *FileRecordedTestsSuite) TestFileAbortCopyNoCopyStarted() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = copyFClient.AbortCopy(context.Background(), "copynotstarted", nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.InvalidQueryParameterValue) +} + +func (f *FileRecordedTestsSuite) TestResizeFile() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), 1234, nil) + _require.NoError(err) + + gResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, int64(1234)) + + _, err = fClient.Resize(context.Background(), 4096, nil) + _require.NoError(err) + + gResp, err = fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, int64(4096)) +} + +func (f *FileRecordedTestsSuite) TestFileResizeZero() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), 10, nil) + _require.NoError(err) + + _, err = fClient.Resize(context.Background(), 0, nil) + _require.NoError(err) + + resp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*resp.ContentLength, int64(0)) +} + +func (f *FileRecordedTestsSuite) TestFileResizeInvalidSizeNegative() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + _, err = fClient.Resize(context.Background(), -4, nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.OutOfRangeInput) +} + +func (f *FileRecordedTestsSuite) TestNegativeFileSizeMoreThanShareQuota() { + _require := require.New(f.T()) + testName := f.T().Name() + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + var fileShareMaxQuota int32 = 1024 // share size in GiB which is 1TiB + var fileMaxAllowedSizeInBytes int64 = 4398046511104 // file size in bytes which is 4 TiB + + shareClient := testcommon.GetShareClient(testcommon.GenerateShareName(testName), svcClient) + _, err = shareClient.Create(context.Background(), &share.CreateOptions{ + Quota: &fileShareMaxQuota, + }) + _require.NoError(err) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileMaxAllowedSizeInBytes, &file.CreateOptions{ + HTTPHeaders: &file.HTTPHeaders{}, + }) + _require.Error(err) +} + +func (f *FileRecordedTestsSuite) TestCreateMaximumSizeFileShare() { + _require := require.New(f.T()) + testName := f.T().Name() + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + var fileShareMaxQuota int32 = 5120 // share size in GiB which is 5TiB + var fileMaxAllowedSizeInBytes int64 = 4398046511104 // file size in bytes which is 4 TiB + + shareClient := testcommon.GetShareClient(testcommon.GenerateShareName(testName), svcClient) + _, err = shareClient.Create(context.Background(), &share.CreateOptions{ + Quota: &fileShareMaxQuota, + }) + _require.NoError(err) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileMaxAllowedSizeInBytes, &file.CreateOptions{ + HTTPHeaders: &file.HTTPHeaders{}, + }) + _require.NoError(err) +} + +func (f *FileRecordedTestsSuite) TestSASFileClientNoKey() { + _require := require.New(f.T()) + accountName, _ := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + + testName := f.T().Name() + shareName := testcommon.GenerateShareName(testName) + fileName := testcommon.GenerateFileName(testName) + fileClient, err := file.NewClientWithNoCredential(fmt.Sprintf("https://%s.file.core.windows.net/%v/%v", accountName, shareName, fileName), nil) + _require.NoError(err) + + permissions := sas.FilePermissions{ + Read: true, + Write: true, + Delete: true, + Create: true, + } + expiry := time.Now().Add(time.Hour) + + _, err = fileClient.GetSASURL(permissions, expiry, nil) + _require.Equal(err, fileerror.MissingSharedKeyCredential) +} + +func (f *FileRecordedTestsSuite) TestSASFileClientSignNegative() { + _require := require.New(f.T()) + accountName, accountKey := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + _require.Greater(len(accountKey), 0) + + cred, err := file.NewSharedKeyCredential(accountName, accountKey) + _require.NoError(err) + + testName := f.T().Name() + shareName := testcommon.GenerateShareName(testName) + fileName := testcommon.GenerateFileName(testName) + fileClient, err := file.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.file.core.windows.net/%v%v", accountName, shareName, fileName), cred, nil) + _require.NoError(err) + + permissions := sas.FilePermissions{ + Read: true, + Write: true, + Delete: true, + Create: true, + } + expiry := time.Time{} + + // zero expiry time + _, err = fileClient.GetSASURL(permissions, expiry, &file.GetSASURLOptions{StartTime: to.Ptr(time.Now())}) + _require.Equal(err.Error(), "service SAS is missing at least one of these: ExpiryTime or Permissions") + + // zero start and expiry time + _, err = fileClient.GetSASURL(permissions, expiry, &file.GetSASURLOptions{}) + _require.Equal(err.Error(), "service SAS is missing at least one of these: ExpiryTime or Permissions") + + // empty permissions + _, err = fileClient.GetSASURL(sas.FilePermissions{}, expiry, nil) + _require.Equal(err.Error(), "service SAS is missing at least one of these: ExpiryTime or Permissions") +} + +func (f *FileRecordedTestsSuite) TestFileUploadClearListRange() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 1024 * 10 + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + gResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + contentSize := 1024 * 2 // 2KB + contentR, contentD := testcommon.GenerateData(contentSize) + md5Value := md5.Sum(contentD) + contentMD5 := md5Value[:] + + uResp, err := fClient.UploadRange(context.Background(), 0, contentR, &file.UploadRangeOptions{ + TransactionalValidation: file.TransferValidationTypeMD5(contentMD5), + }) + _require.NoError(err) + _require.NotNil(uResp.ContentMD5) + _require.EqualValues(uResp.ContentMD5, contentMD5) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.EqualValues(*rangeList.Ranges[0], file.ShareFileRange{Start: to.Ptr(int64(0)), End: to.Ptr(int64(contentSize - 1))}) + + cResp, err := fClient.ClearRange(context.Background(), file.HTTPRange{Offset: 0, Count: int64(contentSize)}, nil) + _require.NoError(err) + _require.Nil(cResp.ContentMD5) + + rangeList2, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList2.Ranges, 0) +} + +func (f *FileUnrecordedTestsSuite) TestFileUploadRangeFromURL() { + _require := require.New(f.T()) + testName := f.T().Name() + + cred, err := testcommon.GetGenericSharedKeyCredential(testcommon.TestAccountDefault) + _require.NoError(err) + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 1024 * 20 + srcFileName := "src" + testcommon.GenerateFileName(testName) + srcFClient := shareClient.NewRootDirectoryClient().NewFileClient(srcFileName) + _, err = srcFClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + gResp, err := srcFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + contentSize := 1024 * 8 // 8KB + content := make([]byte, contentSize) + body := bytes.NewReader(content) + rsc := streaming.NopCloser(body) + contentCRC64 := crc64.Checksum(content, shared.CRC64Table) + + _, err = srcFClient.UploadRange(context.Background(), 0, rsc, nil) + _require.NoError(err) + + perms := sas.FilePermissions{Read: true, Write: true} + sasQueryParams, err := sas.SignatureValues{ + Protocol: sas.ProtocolHTTPS, // Users MUST use HTTPS (not HTTP) + ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration + ShareName: shareName, + FilePath: srcFileName, + Permissions: perms.String(), + }.SignWithSharedKey(cred) + _require.NoError(err) + + srcFileSAS := srcFClient.URL() + "?" + sasQueryParams.Encode() + + destFClient := shareClient.NewRootDirectoryClient().NewFileClient("dest" + testcommon.GenerateFileName(testName)) + _, err = destFClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + uResp, err := destFClient.UploadRangeFromURL(context.Background(), srcFileSAS, 0, 0, int64(contentSize), &file.UploadRangeFromURLOptions{ + SourceContentCRC64: contentCRC64, + }) + _require.NoError(err) + _require.NotNil(uResp.XMSContentCRC64) + _require.EqualValues(binary.LittleEndian.Uint64(uResp.XMSContentCRC64), contentCRC64) + + rangeList, err := destFClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.Equal(*rangeList.Ranges[0].Start, int64(0)) + _require.Equal(*rangeList.Ranges[0].End, int64(contentSize-1)) + + cResp, err := destFClient.ClearRange(context.Background(), file.HTTPRange{Offset: 0, Count: int64(contentSize)}, nil) + _require.NoError(err) + _require.Nil(cResp.ContentMD5) + + rangeList2, err := destFClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList2.Ranges, 0) +} + +func (f *FileRecordedTestsSuite) TestFileUploadRangeFromURLNegative() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 1024 * 20 + srcFileName := "src" + testcommon.GenerateFileName(testName) + srcFClient := testcommon.CreateNewFileFromShare(context.Background(), _require, srcFileName, fileSize, shareClient) + + gResp, err := srcFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + contentSize := 1024 * 8 // 8KB + rsc, content := testcommon.GenerateData(contentSize) + contentCRC64 := crc64.Checksum(content, shared.CRC64Table) + + _, err = srcFClient.UploadRange(context.Background(), 0, rsc, nil) + _require.NoError(err) + + destFClient := testcommon.CreateNewFileFromShare(context.Background(), _require, "dest"+testcommon.GenerateFileName(testName), fileSize, shareClient) + + _, err = destFClient.UploadRangeFromURL(context.Background(), srcFClient.URL(), 0, 0, int64(contentSize), &file.UploadRangeFromURLOptions{ + SourceContentCRC64: contentCRC64, + }) + _require.Error(err) +} + +func (f *FileRecordedTestsSuite) TestFileUploadRangeFromURLOffsetNegative() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 1024 * 20 + srcFileName := "src" + testcommon.GenerateFileName(testName) + srcFClient := testcommon.CreateNewFileFromShare(context.Background(), _require, srcFileName, fileSize, shareClient) + + gResp, err := srcFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + contentSize := 1024 * 8 // 8KB + destFClient := testcommon.CreateNewFileFromShare(context.Background(), _require, "dest"+testcommon.GenerateFileName(testName), fileSize, shareClient) + + // error is returned when source offset is negative + _, err = destFClient.UploadRangeFromURL(context.Background(), srcFClient.URL(), -1, 0, int64(contentSize), nil) + _require.Error(err) + _require.Equal(err.Error(), "invalid argument: source and destination offsets must be >= 0") +} + +func (f *FileUnrecordedTestsSuite) TestFileUploadRangeFromURLCopySourceAuthBlob() { + _require := require.New(f.T()) + testName := f.T().Name() + + accountName, _ := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + + cred, err := testcommon.GetGenericTokenCredential() + _require.NoError(err) + + // Getting token + accessToken, err := cred.GetToken(context.Background(), policy.TokenRequestOptions{Scopes: []string{"https://storage.azure.com/.default"}}) + _require.NoError(err) + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 1024 * 10 + contentSize := 1024 * 8 // 8KB + _, content := testcommon.GenerateData(contentSize) + contentCRC64 := crc64.Checksum(content, shared.CRC64Table) + + // create source block blob + blobClient, err := azblob.NewClient("https://"+accountName+".blob.core.windows.net/", cred, nil) + _require.NoError(err) + + containerName := "goc" + testcommon.GenerateEntityName(testName) + blobName := "blob" + testcommon.GenerateEntityName(testName) + _, err = blobClient.CreateContainer(context.Background(), containerName, nil) + _require.NoError(err) + defer func() { + _, err := blobClient.DeleteContainer(context.Background(), containerName, nil) + _require.NoError(err) + }() + + _, err = blobClient.UploadBuffer(context.Background(), containerName, blobName, content, nil) + _require.NoError(err) + + destFClient := shareClient.NewRootDirectoryClient().NewFileClient("dest" + testcommon.GenerateFileName(testName)) + _, err = destFClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + blobURL := blobClient.ServiceClient().NewContainerClient(containerName).NewBlockBlobClient(blobName).URL() + uResp, err := destFClient.UploadRangeFromURL(context.Background(), blobURL, 0, 0, int64(contentSize), &file.UploadRangeFromURLOptions{ + SourceContentCRC64: contentCRC64, + CopySourceAuthorization: to.Ptr("Bearer " + accessToken.Token), + }) + _require.NoError(err) + _require.NotNil(uResp.XMSContentCRC64) + _require.EqualValues(binary.LittleEndian.Uint64(uResp.XMSContentCRC64), contentCRC64) + + // validate the content uploaded + dResp, err := destFClient.DownloadStream(context.Background(), &file.DownloadStreamOptions{ + Range: file.HTTPRange{Offset: 0, Count: int64(contentSize)}, + }) + _require.NoError(err) + + data, err := ioutil.ReadAll(dResp.Body) + defer func() { + err = dResp.Body.Close() + _require.NoError(err) + }() + + _require.EqualValues(data, content) +} + +func (f *FileUnrecordedTestsSuite) TestFileUploadBuffer() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 100 * 1024 * 1024 + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + gResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + content := make([]byte, fileSize) + _, err = rand.Read(content) + _require.NoError(err) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + err = fClient.UploadBuffer(context.Background(), content, &file.UploadBufferOptions{ + Concurrency: 5, + ChunkSize: 4 * 1024 * 1024, + }) + _require.NoError(err) + + dResp, err := fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + data, err := io.ReadAll(dResp.Body) + _require.NoError(err) + + downloadedMD5Value := md5.Sum(data) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.Equal(*rangeList.Ranges[0].Start, int64(0)) + _require.Equal(*rangeList.Ranges[0].End, fileSize-1) +} + +func (f *FileUnrecordedTestsSuite) TestFileUploadFile() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 200 * 1024 * 1024 + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + gResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + // create local file + content := make([]byte, fileSize) + _, err = rand.Read(content) + _require.NoError(err) + err = ioutil.WriteFile("testFile", content, 0644) + _require.NoError(err) + + defer func() { + err = os.Remove("testFile") + _require.NoError(err) + }() + + fh, err := os.Open("testFile") + _require.NoError(err) + + defer func(fh *os.File) { + err := fh.Close() + _require.NoError(err) + }(fh) + + hash := md5.New() + _, err = io.Copy(hash, fh) + _require.NoError(err) + contentMD5 := hash.Sum(nil) + + err = fClient.UploadFile(context.Background(), fh, &file.UploadFileOptions{ + Concurrency: 5, + ChunkSize: 4 * 1024 * 1024, + }) + _require.NoError(err) + + dResp, err := fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + data, err := io.ReadAll(dResp.Body) + _require.NoError(err) + + downloadedMD5Value := md5.Sum(data) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.Equal(*rangeList.Ranges[0].Start, int64(0)) + _require.Equal(*rangeList.Ranges[0].End, fileSize-1) +} + +func (f *FileUnrecordedTestsSuite) TestFileUploadStream() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 100 * 1024 * 1024 + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + gResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + content := make([]byte, fileSize) + _, err = rand.Read(content) + _require.NoError(err) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + err = fClient.UploadStream(context.Background(), streaming.NopCloser(bytes.NewReader(content)), &file.UploadStreamOptions{ + Concurrency: 5, + ChunkSize: 4 * 1024 * 1024, + }) + _require.NoError(err) + + dResp, err := fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + data, err := io.ReadAll(dResp.Body) + _require.NoError(err) + + downloadedMD5Value := md5.Sum(data) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.Equal(*rangeList.Ranges[0].Start, int64(0)) + _require.Equal(*rangeList.Ranges[0].End, fileSize-1) +} + +func (f *FileUnrecordedTestsSuite) TestFileDownloadBuffer() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 100 * 1024 * 1024 + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + gResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + content := make([]byte, fileSize) + _, err = rand.Read(content) + _require.NoError(err) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + err = fClient.UploadBuffer(context.Background(), content, &file.UploadBufferOptions{ + Concurrency: 5, + ChunkSize: 4 * 1024 * 1024, + }) + _require.NoError(err) + + destBuffer := make([]byte, fileSize) + cnt, err := fClient.DownloadBuffer(context.Background(), destBuffer, &file.DownloadBufferOptions{ + ChunkSize: 10 * 1024 * 1024, + Concurrency: 5, + }) + _require.NoError(err) + _require.Equal(cnt, fileSize) + + downloadedMD5Value := md5.Sum(destBuffer) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.Equal(*rangeList.Ranges[0].Start, int64(0)) + _require.Equal(*rangeList.Ranges[0].End, fileSize-1) +} + +func (f *FileUnrecordedTestsSuite) TestFileDownloadFile() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 100 * 1024 * 1024 + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + gResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + content := make([]byte, fileSize) + _, err = rand.Read(content) + _require.NoError(err) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + err = fClient.UploadBuffer(context.Background(), content, &file.UploadBufferOptions{ + Concurrency: 5, + ChunkSize: 4 * 1024 * 1024, + }) + _require.NoError(err) + + destFileName := "BigFile-downloaded.bin" + destFile, err := os.Create(destFileName) + _require.NoError(err) + defer func(name string) { + err = os.Remove(name) + _require.NoError(err) + }(destFileName) + defer func(destFile *os.File) { + err = destFile.Close() + _require.NoError(err) + }(destFile) + + cnt, err := fClient.DownloadFile(context.Background(), destFile, &file.DownloadFileOptions{ + ChunkSize: 10 * 1024 * 1024, + Concurrency: 5, + }) + _require.NoError(err) + _require.Equal(cnt, fileSize) + + hash := md5.New() + _, err = io.Copy(hash, destFile) + _require.NoError(err) + downloadedContentMD5 := hash.Sum(nil) + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.Equal(*rangeList.Ranges[0].Start, int64(0)) + _require.Equal(*rangeList.Ranges[0].End, fileSize-1) +} + +func (f *FileRecordedTestsSuite) TestUploadDownloadDefaultNonDefaultMD5() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, "src"+testcommon.GenerateFileName(testName), 2048, shareClient) + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + contentR, contentD := testcommon.GenerateData(2048) + + pResp, err := fClient.UploadRange(context.Background(), 0, contentR, nil) + _require.NoError(err) + _require.NotNil(pResp.ContentMD5) + _require.NotNil(pResp.IsServerEncrypted) + _require.NotNil(pResp.ETag) + _require.Equal(pResp.LastModified.IsZero(), false) + _require.NotNil(pResp.RequestID) + _require.NotNil(pResp.Version) + _require.Equal(pResp.Date.IsZero(), false) + + // Get with rangeGetContentMD5 enabled. + // Partial data, check status code 206. + resp, err := fClient.DownloadStream(context.Background(), &file.DownloadStreamOptions{ + Range: file.HTTPRange{Offset: 0, Count: 1024}, + RangeGetContentMD5: to.Ptr(true), + }) + _require.NoError(err) + _require.Equal(*resp.ContentLength, int64(1024)) + _require.NotNil(resp.ContentMD5) + _require.Equal(*resp.ContentType, "application/octet-stream") + + downloadedData, err := ioutil.ReadAll(resp.Body) + _require.NoError(err) + _require.EqualValues(downloadedData, contentD[:1024]) + + // Set ContentMD5 for the entire file. + _, err = fClient.SetHTTPHeaders(context.Background(), &file.SetHTTPHeadersOptions{ + HTTPHeaders: &file.HTTPHeaders{ + ContentMD5: pResp.ContentMD5, + ContentLanguage: to.Ptr("test")}, + }) + _require.NoError(err) + + // Test get with another type of range index, and validate if FileContentMD5 can be got correct. + resp, err = fClient.DownloadStream(context.Background(), &file.DownloadStreamOptions{ + Range: file.HTTPRange{Offset: 1024, Count: file.CountToEnd}, + }) + _require.NoError(err) + _require.Equal(*resp.ContentLength, int64(1024)) + _require.Nil(resp.ContentMD5) + _require.EqualValues(resp.FileContentMD5, pResp.ContentMD5) + _require.Equal(*resp.ContentLanguage, "test") + // Note: when it's downloading range, range's MD5 is returned, when set rangeGetContentMD5=true, currently set it to false, so should be empty + + downloadedData, err = ioutil.ReadAll(resp.Body) + _require.NoError(err) + _require.EqualValues(downloadedData, contentD[1024:]) + + _require.Equal(*resp.AcceptRanges, "bytes") + _require.Nil(resp.CacheControl) + _require.Nil(resp.ContentDisposition) + _require.Nil(resp.ContentEncoding) + _require.Equal(*resp.ContentRange, "bytes 1024-2047/2048") + _require.Nil(resp.ContentType) // Note ContentType is set to empty during SetHTTPHeaders + _require.Nil(resp.CopyID) + _require.Nil(resp.CopyProgress) + _require.Nil(resp.CopySource) + _require.Nil(resp.CopyStatus) + _require.Nil(resp.CopyStatusDescription) + _require.Equal(resp.Date.IsZero(), false) + _require.NotEqual(*resp.ETag, "") + _require.Equal(resp.LastModified.IsZero(), false) + _require.Nil(resp.Metadata) + _require.NotEqual(*resp.RequestID, "") + _require.NotEqual(*resp.Version, "") + _require.NotNil(resp.IsServerEncrypted) + + // Get entire fClient, check status code 200. + resp, err = fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + _require.Equal(*resp.ContentLength, int64(2048)) + _require.EqualValues(resp.ContentMD5, pResp.ContentMD5) // Note: This case is inted to get entire fClient, entire file's MD5 will be returned. + _require.Nil(resp.FileContentMD5) // Note: FileContentMD5 is returned, only when range is specified explicitly. + + downloadedData, err = ioutil.ReadAll(resp.Body) + _require.NoError(err) + _require.EqualValues(downloadedData, contentD[:]) + + _require.Equal(*resp.AcceptRanges, "bytes") + _require.Nil(resp.CacheControl) + _require.Nil(resp.ContentDisposition) + _require.Nil(resp.ContentEncoding) + _require.Nil(resp.ContentRange) // Note: ContentRange is returned, only when range is specified explicitly. + _require.Nil(resp.ContentType) + _require.Nil(resp.CopyCompletionTime) + _require.Nil(resp.CopyID) + _require.Nil(resp.CopyProgress) + _require.Nil(resp.CopySource) + _require.Nil(resp.CopyStatus) + _require.Nil(resp.CopyStatusDescription) + _require.Equal(resp.Date.IsZero(), false) + _require.NotEqual(*resp.ETag, "") + _require.Equal(resp.LastModified.IsZero(), false) + _require.Nil(resp.Metadata) + _require.NotEqual(*resp.RequestID, "") + _require.NotEqual(*resp.Version, "") + _require.NotNil(resp.IsServerEncrypted) +} + +func (f *FileRecordedTestsSuite) TestFileDownloadDataNonExistentFile() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.GetFileClientFromShare(testcommon.GenerateFileName(testName), shareClient) + + _, err = fClient.DownloadStream(context.Background(), nil) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ResourceNotFound) +} + +func (f *FileRecordedTestsSuite) TestFileDownloadDataOffsetOutOfRange() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 0, shareClient) + + _, err = fClient.DownloadStream(context.Background(), &file.DownloadStreamOptions{ + Range: file.HTTPRange{ + Offset: int64(len(testcommon.FileDefaultData)), + Count: file.CountToEnd, + }, + }) + testcommon.ValidateFileErrorCode(_require, err, fileerror.InvalidRange) +} + +func (f *FileRecordedTestsSuite) TestFileDownloadDataEntireFile() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShareWithData(context.Background(), _require, testcommon.GenerateFileName(testName), shareClient) + + resp, err := fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + // Specifying a count of 0 results in the value being ignored + data, err := ioutil.ReadAll(resp.Body) + _require.NoError(err) + _require.EqualValues(string(data), testcommon.FileDefaultData) +} + +func (f *FileRecordedTestsSuite) TestFileDownloadDataCountExact() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShareWithData(context.Background(), _require, testcommon.GenerateFileName(testName), shareClient) + + resp, err := fClient.DownloadStream(context.Background(), &file.DownloadStreamOptions{ + Range: file.HTTPRange{ + Offset: 0, + Count: int64(len(testcommon.FileDefaultData)), + }, + }) + _require.NoError(err) + + data, err := ioutil.ReadAll(resp.Body) + _require.NoError(err) + _require.EqualValues(string(data), testcommon.FileDefaultData) +} + +func (f *FileRecordedTestsSuite) TestFileDownloadDataCountOutOfRange() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShareWithData(context.Background(), _require, testcommon.GenerateFileName(testName), shareClient) + + resp, err := fClient.DownloadStream(context.Background(), &file.DownloadStreamOptions{ + Range: file.HTTPRange{ + Offset: 0, + Count: int64(len(testcommon.FileDefaultData)) * 2, + }, + }) + _require.NoError(err) + + data, err := ioutil.ReadAll(resp.Body) + _require.NoError(err) + _require.EqualValues(string(data), testcommon.FileDefaultData) +} + +func (f *FileRecordedTestsSuite) TestFileUploadRangeNilBody() { + _require := require.New(f.T()) + testName := f.T().Name() + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, "src"+testcommon.GenerateFileName(testName), 0, shareClient) + + _, err = fClient.UploadRange(context.Background(), 0, nil, nil) + _require.Error(err) + _require.Contains(err.Error(), "body must not be nil") +} + +func (f *FileRecordedTestsSuite) TestFileUploadRangeEmptyBody() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 0, shareClient) + + _, err = fClient.UploadRange(context.Background(), 0, streaming.NopCloser(bytes.NewReader([]byte{})), nil) + _require.Error(err) + _require.Contains(err.Error(), "body must contain readable data whose size is > 0") +} + +func (f *FileRecordedTestsSuite) TestFileUploadRangeNonExistentFile() { + _require := require.New(f.T()) + testName := f.T().Name() + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.GetFileClientFromShare(testcommon.GenerateFileName(testName), shareClient) + + rsc, _ := testcommon.GenerateData(12) + _, err = fClient.UploadRange(context.Background(), 0, rsc, nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ResourceNotFound) +} + +func (f *FileRecordedTestsSuite) TestFileUploadRangeTransactionalMD5() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 2048, shareClient) + + contentR, contentD := testcommon.GenerateData(2048) + _md5 := md5.Sum(contentD) + + // Upload range with correct transactional MD5 + pResp, err := fClient.UploadRange(context.Background(), 0, contentR, &file.UploadRangeOptions{ + TransactionalValidation: file.TransferValidationTypeMD5(_md5[:]), + }) + _require.NoError(err) + _require.NotNil(pResp.ContentMD5) + _require.NotNil(pResp.ETag) + _require.Equal(pResp.LastModified.IsZero(), false) + _require.NotNil(pResp.RequestID) + _require.NotNil(pResp.Version) + _require.Equal(pResp.Date.IsZero(), false) + _require.EqualValues(pResp.ContentMD5, _md5[:]) + + // Upload range with empty MD5, nil MD5 is covered by other cases. + pResp, err = fClient.UploadRange(context.Background(), 1024, streaming.NopCloser(bytes.NewReader(contentD[1024:])), nil) + _require.NoError(err) + _require.NotNil(pResp.ContentMD5) + + resp, err := fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + _require.Equal(*resp.ContentLength, int64(2048)) + + downloadedData, err := ioutil.ReadAll(resp.Body) + _require.NoError(err) + _require.EqualValues(downloadedData, contentD[:]) +} + +func (f *FileRecordedTestsSuite) TestFileUploadRangeIncorrectTransactionalMD5() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 2048, shareClient) + + contentR, _ := testcommon.GenerateData(2048) + _, incorrectMD5 := testcommon.GenerateData(16) + + // Upload range with incorrect transactional MD5 + _, err = fClient.UploadRange(context.Background(), 0, contentR, &file.UploadRangeOptions{ + TransactionalValidation: file.TransferValidationTypeMD5(incorrectMD5[:]), + }) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.MD5Mismatch) +} + +// Testings for GetRangeList and ClearRange +func (f *FileRecordedTestsSuite) TestGetRangeListNonDefaultExact() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.GetFileClientFromShare(testcommon.GenerateFileName(testName), shareClient) + + fileSize := int64(5 * 1024) + _, err = fClient.Create(context.Background(), fileSize, &file.CreateOptions{HTTPHeaders: &file.HTTPHeaders{}}) + _require.NoError(err) + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + rsc, _ := testcommon.GenerateData(1024) + putResp, err := fClient.UploadRange(context.Background(), 0, rsc, nil) + _require.NoError(err) + _require.Equal(putResp.LastModified.IsZero(), false) + _require.NotNil(putResp.ETag) + _require.NotNil(putResp.ContentMD5) + _require.NotNil(putResp.RequestID) + _require.NotNil(putResp.Version) + _require.Equal(putResp.Date.IsZero(), false) + + rangeList, err := fClient.GetRangeList(context.Background(), &file.GetRangeListOptions{ + Range: file.HTTPRange{ + Offset: 0, + Count: fileSize, + }, + }) + _require.NoError(err) + _require.Equal(rangeList.LastModified.IsZero(), false) + _require.NotNil(rangeList.ETag) + _require.Equal(*rangeList.FileContentLength, fileSize) + _require.NotNil(rangeList.RequestID) + _require.NotNil(rangeList.Version) + _require.Equal(rangeList.Date.IsZero(), false) + _require.Len(rangeList.Ranges, 1) + _require.Equal(*rangeList.Ranges[0].Start, int64(0)) + _require.Equal(*rangeList.Ranges[0].End, int64(1023)) +} + +// Default means clear the entire file's range +func (f *FileRecordedTestsSuite) TestClearRangeDefault() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 2048, shareClient) + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + rsc, _ := testcommon.GenerateData(2048) + _, err = fClient.UploadRange(context.Background(), 0, rsc, nil) + _require.NoError(err) + + _, err = fClient.ClearRange(context.Background(), file.HTTPRange{Offset: 0, Count: 2048}, nil) + _require.NoError(err) + + rangeList, err := fClient.GetRangeList(context.Background(), &file.GetRangeListOptions{ + Range: file.HTTPRange{Offset: 0, Count: file.CountToEnd}, + }) + _require.NoError(err) + _require.Len(rangeList.Ranges, 0) +} + +func (f *FileRecordedTestsSuite) TestClearRangeNonDefault() { + _require := require.New(f.T()) + testName := f.T().Name() + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 4096, shareClient) + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + rsc, _ := testcommon.GenerateData(2048) + _, err = fClient.UploadRange(context.Background(), 2048, rsc, nil) + _require.NoError(err) + + _, err = fClient.ClearRange(context.Background(), file.HTTPRange{Offset: 2048, Count: 2048}, nil) + _require.NoError(err) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 0) +} + +func (f *FileRecordedTestsSuite) TestClearRangeMultipleRanges() { + _require := require.New(f.T()) + testName := f.T().Name() + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 2048, shareClient) + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + rsc, _ := testcommon.GenerateData(2048) + _, err = fClient.UploadRange(context.Background(), 0, rsc, nil) + _require.NoError(err) + + _, err = fClient.ClearRange(context.Background(), file.HTTPRange{Offset: 1024, Count: 1024}, nil) + _require.NoError(err) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.EqualValues(*rangeList.Ranges[0], file.ShareFileRange{Start: to.Ptr(int64(0)), End: to.Ptr(int64(1023))}) +} + +// When not 512 aligned, clear range will set 0 the non-512 aligned range, and will not eliminate the range. +func (f *FileRecordedTestsSuite) TestClearRangeNonDefaultCount() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 1, shareClient) + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + d := []byte{65} + _, err = fClient.UploadRange(context.Background(), 0, streaming.NopCloser(bytes.NewReader(d)), nil) + _require.NoError(err) + + rangeList, err := fClient.GetRangeList(context.Background(), &file.GetRangeListOptions{ + Range: file.HTTPRange{Offset: 0, Count: file.CountToEnd}, + }) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.EqualValues(*rangeList.Ranges[0], file.ShareFileRange{Start: to.Ptr(int64(0)), End: to.Ptr(int64(0))}) + + _, err = fClient.ClearRange(context.Background(), file.HTTPRange{Offset: 0, Count: 1}, nil) + _require.NoError(err) + + rangeList, err = fClient.GetRangeList(context.Background(), &file.GetRangeListOptions{ + Range: file.HTTPRange{Offset: 0, Count: file.CountToEnd}, + }) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.EqualValues(*rangeList.Ranges[0], file.ShareFileRange{Start: to.Ptr(int64(0)), End: to.Ptr(int64(0))}) + + dResp, err := fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + _bytes, err := ioutil.ReadAll(dResp.Body) + _require.NoError(err) + _require.EqualValues(_bytes, []byte{0}) +} + +func (f *FileRecordedTestsSuite) TestFileClearRangeNegativeInvalidCount() { + _require := require.New(f.T()) + testName := f.T().Name() + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.GetShareClient(testcommon.GenerateShareName(testName), svcClient) + fClient := testcommon.GetFileClientFromShare(testcommon.GenerateFileName(testName), shareClient) + + _, err = fClient.ClearRange(context.Background(), file.HTTPRange{Offset: 0, Count: 0}, nil) + _require.Error(err) + _require.Contains(err.Error(), "invalid argument: either offset is < 0 or count <= 0") +} + +func (f *FileRecordedTestsSuite) TestFileGetRangeListDefaultEmptyFile() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 0, shareClient) + + resp, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(resp.Ranges, 0) +} + +func setupGetRangeListTest(_require *require.Assertions, testName string, fileSize int64, shareClient *share.Client) *file.Client { + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), fileSize, shareClient) + rsc, _ := testcommon.GenerateData(int(fileSize)) + _, err := fClient.UploadRange(context.Background(), 0, rsc, nil) + _require.NoError(err) + return fClient +} + +func (f *FileRecordedTestsSuite) TestFileGetRangeListDefaultRange() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fileSize := int64(512) + fClient := setupGetRangeListTest(_require, testName, fileSize, shareClient) + + resp, err := fClient.GetRangeList(context.Background(), &file.GetRangeListOptions{ + Range: file.HTTPRange{Offset: 0, Count: file.CountToEnd}, + }) + _require.NoError(err) + _require.Len(resp.Ranges, 1) + _require.EqualValues(*resp.Ranges[0], file.ShareFileRange{Start: to.Ptr(int64(0)), End: to.Ptr(fileSize - 1)}) +} + +func (f *FileRecordedTestsSuite) TestFileGetRangeListNonContiguousRanges() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fileSize := int64(512) + fClient := setupGetRangeListTest(_require, testName, fileSize, shareClient) + + _, err = fClient.Resize(context.Background(), fileSize*3, nil) + _require.NoError(err) + + rsc, _ := testcommon.GenerateData(int(fileSize)) + _, err = fClient.UploadRange(context.Background(), fileSize*2, rsc, nil) + _require.NoError(err) + + resp, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(resp.Ranges, 2) + _require.EqualValues(*resp.Ranges[0], file.ShareFileRange{Start: to.Ptr(int64(0)), End: to.Ptr(fileSize - 1)}) + _require.EqualValues(*resp.Ranges[1], file.ShareFileRange{Start: to.Ptr(fileSize * 2), End: to.Ptr((fileSize * 3) - 1)}) +} + +func (f *FileRecordedTestsSuite) TestFileGetRangeListNonContiguousRangesCountLess() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fileSize := int64(512) + fClient := setupGetRangeListTest(_require, testName, fileSize, shareClient) + + resp, err := fClient.GetRangeList(context.Background(), &file.GetRangeListOptions{ + Range: file.HTTPRange{Offset: 0, Count: fileSize}, + }) + _require.NoError(err) + _require.Len(resp.Ranges, 1) + _require.EqualValues(int64(0), *(resp.Ranges[0].Start)) + _require.EqualValues(fileSize-1, *(resp.Ranges[0].End)) +} + +func (f *FileRecordedTestsSuite) TestFileGetRangeListNonContiguousRangesCountExceed() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fileSize := int64(512) + fClient := setupGetRangeListTest(_require, testName, fileSize, shareClient) + + resp, err := fClient.GetRangeList(context.Background(), &file.GetRangeListOptions{ + Range: file.HTTPRange{Offset: 0, Count: fileSize + 1}, + }) + _require.NoError(err) + _require.NoError(err) + _require.Len(resp.Ranges, 1) + _require.EqualValues(*resp.Ranges[0], file.ShareFileRange{Start: to.Ptr(int64(0)), End: to.Ptr(fileSize - 1)}) +} + +func (f *FileRecordedTestsSuite) TestFileGetRangeListSnapshot() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer func() { + _, err := shareClient.Delete(context.Background(), &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) + _require.NoError(err) + }() + + fileSize := int64(512) + fClient := setupGetRangeListTest(_require, testName, fileSize, shareClient) + + resp, _ := shareClient.CreateSnapshot(context.Background(), nil) + _require.NotNil(resp.Snapshot) + + resp2, err := fClient.GetRangeList(context.Background(), &file.GetRangeListOptions{ + Range: file.HTTPRange{Offset: 0, Count: file.CountToEnd}, + ShareSnapshot: resp.Snapshot, + }) + _require.NoError(err) + _require.Len(resp2.Ranges, 1) + _require.EqualValues(*resp2.Ranges[0], file.ShareFileRange{Start: to.Ptr(int64(0)), End: to.Ptr(fileSize - 1)}) +} + +func (f *FileRecordedTestsSuite) TestFileUploadDownloadSmallBuffer() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 10 * 1024 + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + gResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + _, content := testcommon.GenerateData(int(fileSize)) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + err = fClient.UploadBuffer(context.Background(), content, &file.UploadBufferOptions{ + Concurrency: 5, + ChunkSize: 2 * 1024, + }) + _require.NoError(err) + + destBuffer := make([]byte, fileSize) + cnt, err := fClient.DownloadBuffer(context.Background(), destBuffer, &file.DownloadBufferOptions{ + ChunkSize: 2 * 1024, + Concurrency: 5, + }) + _require.NoError(err) + _require.Equal(cnt, fileSize) + + downloadedMD5Value := md5.Sum(destBuffer) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.Equal(*rangeList.Ranges[0].Start, int64(0)) + _require.Equal(*rangeList.Ranges[0].End, fileSize-1) +} + +func (f *FileRecordedTestsSuite) TestFileUploadDownloadSmallFile() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 10 * 1024 + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + gResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + // create local file + _, content := testcommon.GenerateData(int(fileSize)) + srcFileName := "testFileUpload" + err = ioutil.WriteFile(srcFileName, content, 0644) + _require.NoError(err) + defer func() { + err = os.Remove(srcFileName) + _require.NoError(err) + }() + fh, err := os.Open(srcFileName) + _require.NoError(err) + defer func(fh *os.File) { + err := fh.Close() + _require.NoError(err) + }(fh) + + srcHash := md5.New() + _, err = io.Copy(srcHash, fh) + _require.NoError(err) + contentMD5 := srcHash.Sum(nil) + + err = fClient.UploadFile(context.Background(), fh, &file.UploadFileOptions{ + Concurrency: 5, + ChunkSize: 2 * 1024, + }) + _require.NoError(err) + + destFileName := "SmallFile-downloaded.bin" + destFile, err := os.Create(destFileName) + _require.NoError(err) + defer func(name string) { + err = os.Remove(name) + _require.NoError(err) + }(destFileName) + defer func(destFile *os.File) { + err = destFile.Close() + _require.NoError(err) + }(destFile) + + cnt, err := fClient.DownloadFile(context.Background(), destFile, &file.DownloadFileOptions{ + ChunkSize: 2 * 1024, + Concurrency: 5, + }) + _require.NoError(err) + _require.Equal(cnt, fileSize) + + destHash := md5.New() + _, err = io.Copy(destHash, destFile) + _require.NoError(err) + downloadedContentMD5 := destHash.Sum(nil) + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.Equal(*rangeList.Ranges[0].Start, int64(0)) + _require.Equal(*rangeList.Ranges[0].End, fileSize-1) +} + +func (f *FileRecordedTestsSuite) TestFileUploadDownloadSmallStream() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 10 * 1024 + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + gResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + _, content := testcommon.GenerateData(int(fileSize)) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + err = fClient.UploadStream(context.Background(), streaming.NopCloser(bytes.NewReader(content)), &file.UploadStreamOptions{ + Concurrency: 5, + ChunkSize: 2 * 1024, + }) + _require.NoError(err) + + dResp, err := fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + data, err := io.ReadAll(dResp.Body) + _require.NoError(err) + + downloadedMD5Value := md5.Sum(data) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.Equal(*rangeList.Ranges[0].Start, int64(0)) + _require.Equal(*rangeList.Ranges[0].End, fileSize-1) +} + +func (f *FileRecordedTestsSuite) TestFileUploadDownloadWithProgress() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 10 * 1024 + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + gResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + _, content := testcommon.GenerateData(int(fileSize)) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + bytesUploaded := int64(0) + err = fClient.UploadBuffer(context.Background(), content, &file.UploadBufferOptions{ + Concurrency: 5, + ChunkSize: 2 * 1024, + Progress: func(bytesTransferred int64) { + _require.GreaterOrEqual(bytesTransferred, bytesUploaded) + bytesUploaded = bytesTransferred + }, + }) + _require.NoError(err) + _require.Equal(bytesUploaded, fileSize) + + destBuffer := make([]byte, fileSize) + bytesDownloaded := int64(0) + cnt, err := fClient.DownloadBuffer(context.Background(), destBuffer, &file.DownloadBufferOptions{ + ChunkSize: 2 * 1024, + Concurrency: 5, + Progress: func(bytesTransferred int64) { + _require.GreaterOrEqual(bytesTransferred, bytesDownloaded) + bytesDownloaded = bytesTransferred + }, + }) + _require.NoError(err) + _require.Equal(cnt, fileSize) + _require.Equal(bytesDownloaded, fileSize) + + downloadedMD5Value := md5.Sum(destBuffer) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.Equal(*rangeList.Ranges[0].Start, int64(0)) + _require.Equal(*rangeList.Ranges[0].End, fileSize-1) +} + +func (f *FileRecordedTestsSuite) TestFileListHandlesDefault() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 2048, shareClient) + + resp, err := fClient.ListHandles(context.Background(), nil) + _require.NoError(err) + _require.Len(resp.Handles, 0) + _require.NotNil(resp.NextMarker) + _require.Equal(*resp.NextMarker, "") +} + +func (f *FileRecordedTestsSuite) TestFileForceCloseHandlesDefault() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 2048, shareClient) + + resp, err := fClient.ForceCloseHandles(context.Background(), "*", nil) + _require.NoError(err) + _require.EqualValues(*resp.NumberOfHandlesClosed, 0) + _require.EqualValues(*resp.NumberOfHandlesFailedToClose, 0) + _require.Nil(resp.Marker) +} + +// TODO: Add tests for retry header options + +// TODO: fix links in README: source, file_error, samples diff --git a/sdk/storage/azfile/file/constants.go b/sdk/storage/azfile/file/constants.go new file mode 100644 index 000000000000..c5687bd1b3b5 --- /dev/null +++ b/sdk/storage/azfile/file/constants.go @@ -0,0 +1,78 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" +) + +const ( + _1MiB = 1024 * 1024 + CountToEnd = 0 + + // MaxUpdateRangeBytes indicates the maximum number of bytes that can be updated in a call to Client.UploadRange. + MaxUpdateRangeBytes = 4 * 1024 * 1024 // 4MiB + + // MaxFileSize indicates the maximum size of the file allowed. + MaxFileSize = 4 * 1024 * 1024 * 1024 * 1024 // 4 TiB + + // DefaultDownloadChunkSize is default chunk size + DefaultDownloadChunkSize = int64(4 * 1024 * 1024) // 4MiB +) + +// CopyStatusType defines the states of the copy operation. +type CopyStatusType = generated.CopyStatusType + +const ( + CopyStatusTypePending CopyStatusType = generated.CopyStatusTypePending + CopyStatusTypeSuccess CopyStatusType = generated.CopyStatusTypeSuccess + CopyStatusTypeAborted CopyStatusType = generated.CopyStatusTypeAborted + CopyStatusTypeFailed CopyStatusType = generated.CopyStatusTypeFailed +) + +// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return generated.PossibleCopyStatusTypeValues() +} + +// PermissionCopyModeType determines the copy behavior of the security descriptor of the file. +// - source: The security descriptor on the destination file is copied from the source file. +// - override: The security descriptor on the destination file is determined via the x-ms-file-permission or x-ms-file-permission-key header. +type PermissionCopyModeType = generated.PermissionCopyModeType + +const ( + PermissionCopyModeTypeSource PermissionCopyModeType = generated.PermissionCopyModeTypeSource + PermissionCopyModeTypeOverride PermissionCopyModeType = generated.PermissionCopyModeTypeOverride +) + +// PossiblePermissionCopyModeTypeValues returns the possible values for the PermissionCopyModeType const type. +func PossiblePermissionCopyModeTypeValues() []PermissionCopyModeType { + return generated.PossiblePermissionCopyModeTypeValues() +} + +// RangeWriteType represents one of the following options. +// - update: Writes the bytes specified by the request body into the specified range. The Range and Content-Length headers must match to perform the update. +// - clear: Clears the specified range and releases the space used in storage for that range. To clear a range, set the Content-Length header to zero, +// and set the Range header to a value that indicates the range to clear, up to maximum file size. +type RangeWriteType = generated.FileRangeWriteType + +const ( + RangeWriteTypeUpdate RangeWriteType = generated.FileRangeWriteTypeUpdate + RangeWriteTypeClear RangeWriteType = generated.FileRangeWriteTypeClear +) + +// PossibleRangeWriteTypeValues returns the possible values for the RangeWriteType const type. +func PossibleRangeWriteTypeValues() []RangeWriteType { + return generated.PossibleFileRangeWriteTypeValues() +} + +// TransferValidationType abstracts the various mechanisms used to verify a transfer. +type TransferValidationType = exported.TransferValidationType + +// TransferValidationTypeMD5 is a TransferValidationType used to provide a precomputed MD5. +type TransferValidationTypeMD5 = exported.TransferValidationTypeMD5 diff --git a/sdk/storage/azfile/file/examples_test.go b/sdk/storage/azfile/file/examples_test.go new file mode 100644 index 000000000000..b54aa5aab215 --- /dev/null +++ b/sdk/storage/azfile/file/examples_test.go @@ -0,0 +1,650 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file_test + +import ( + "bytes" + "context" + "crypto/rand" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "io" + "io/ioutil" + "log" + "os" + "strings" + "time" +) + +func handleError(err error) { + if err != nil { + log.Fatal(err.Error()) + } +} + +const random64BString string = "2SDgZj6RkKYzJpu04sweQek4uWHO8ndPnYlZ0tnFS61hjnFZ5IkvIGGY44eKABov" + +func generateData(sizeInBytes int) (io.ReadSeekCloser, []byte) { + data := make([]byte, sizeInBytes) + _len := len(random64BString) + if sizeInBytes > _len { + count := sizeInBytes / _len + if sizeInBytes%_len != 0 { + count = count + 1 + } + copy(data[:], strings.Repeat(random64BString, count)) + } else { + copy(data[:], random64BString) + } + return streaming.NopCloser(bytes.NewReader(data)), data +} + +func Example_client_NewClient_CreateShare_CreateDir_CreateFile() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + client, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + shareClient := client.NewShareClient("testShare") + fmt.Println(shareClient.URL()) + + dirClient := shareClient.NewDirectoryClient("testDir") + fmt.Println(dirClient.URL()) + + fileClient := dirClient.NewFileClient("testFile") + fmt.Println(fileClient.URL()) + +} + +func Example_file_NewClientFromConnectionString() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + filePath := "testDir/testFile" + fileClient, err := file.NewClientFromConnectionString(connectionString, shareName, filePath, nil) + handleError(err) + fmt.Println(fileClient.URL()) +} + +func Example_fileClient_CreateAndDelete() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + fileName := "testFile" + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(context.Background(), 5, nil) + handleError(err) + + _, err = fileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_fileClient_GetProperties() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + fileName := "testFile" + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(context.Background(), 5, nil) + handleError(err) + + _, err = fileClient.GetProperties(context.Background(), nil) + handleError(err) + + _, err = fileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) + +} + +func Example_fileClient_SetAndGetMetadata() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + fileName := "testFile" + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(context.Background(), 5, nil) + handleError(err) + + opts := file.SetMetadataOptions{Metadata: map[string]*string{"hello": to.Ptr("world")}} + _, err = fileClient.SetMetadata(context.Background(), &opts) + handleError(err) + + get, err := fileClient.GetProperties(context.Background(), nil) + handleError(err) + + if get.Metadata == nil { + log.Fatal("No metadata returned") + } + for k, v := range get.Metadata { + fmt.Print(k + "=" + *v + "\n") + } + + _, err = fileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_fileClient_UploadBuffer() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + fileName := "testFile" + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(context.Background(), 5, nil) + handleError(err) + + data := []byte{'h', 'e', 'l', 'l', 'o'} + err = fileClient.UploadBuffer(context.Background(), data, nil) + handleError(err) + + _, err = fileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_fileClient_UploadStream() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + fileName := "testFile" + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(context.Background(), 5, nil) + handleError(err) + + err = fileClient.UploadStream( + context.TODO(), + streaming.NopCloser(strings.NewReader("Some text")), + nil, + ) + handleError(err) + + _, err = fileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_fileClient_UploadAndClearRange() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + fileName := "testFile" + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(context.Background(), 5, nil) + handleError(err) + + contentR, _ := generateData(5) + + _, err = fileClient.UploadRange(context.Background(), 0, contentR, nil) + handleError(err) + + rangeList, err := fileClient.GetRangeList(context.Background(), nil) + handleError(err) + fmt.Println(rangeList.Ranges) + + _, err = fileClient.ClearRange(context.Background(), file.HTTPRange{Offset: 0, Count: int64(5)}, nil) + handleError(err) + + rangeList2, err := fileClient.GetRangeList(context.Background(), nil) + handleError(err) + + fmt.Println(rangeList2.Ranges, 0) + _, err = fileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_fileClient_StartCopyFromURL() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + srcFileName := "testFile" + dstFileName := "testFile2" + fileSize := int64(5) + + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + srcFileClient := shareClient.NewRootDirectoryClient().NewFileClient(srcFileName) + _, err = srcFileClient.Create(context.Background(), fileSize, nil) + handleError(err) + + dstFileClient := shareClient.NewRootDirectoryClient().NewFileClient(dstFileName) + + contentR, _ := generateData(int(fileSize)) + + _, err = srcFileClient.UploadRange(context.Background(), 0, contentR, nil) + handleError(err) + + // you can also use AbortCopy to abort copying + _, err = dstFileClient.StartCopyFromURL(context.Background(), srcFileClient.URL(), nil) + handleError(err) + + _, err = srcFileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = dstFileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_fileClient_DownloadStream() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + srcFileName := "testFile" + fileSize := int64(5) + + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + srcFileClient := shareClient.NewRootDirectoryClient().NewFileClient(srcFileName) + _, err = srcFileClient.Create(context.Background(), fileSize, nil) + handleError(err) + + contentR, _ := generateData(int(fileSize)) + + _, err = srcFileClient.UploadRange(context.Background(), 0, contentR, nil) + handleError(err) + + // validate data copied + resp, err := srcFileClient.DownloadStream(context.Background(), &file.DownloadStreamOptions{ + Range: file.HTTPRange{Offset: 0, Count: fileSize}, + }) + handleError(err) + + content1, err := io.ReadAll(resp.Body) + handleError(err) + fmt.Println(content1) + + _, err = srcFileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_fileClient_DownloadBuffer() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + srcFileName := "testFile" + fileSize := int64(5) + + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + srcFileClient := shareClient.NewRootDirectoryClient().NewFileClient(srcFileName) + _, err = srcFileClient.Create(context.Background(), fileSize, nil) + handleError(err) + + content := make([]byte, fileSize) + _, err = rand.Read(content) + handleError(err) + + err = srcFileClient.UploadBuffer(context.Background(), content, nil) + handleError(err) + + destBuffer := make([]byte, fileSize) + _, err = srcFileClient.DownloadBuffer(context.Background(), destBuffer, nil) + handleError(err) + + _, err = srcFileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_fileClient_DownloadFile() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + srcFileName := "testFile" + fileSize := int64(5) + + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + srcFileClient := shareClient.NewRootDirectoryClient().NewFileClient(srcFileName) + _, err = srcFileClient.Create(context.Background(), fileSize, nil) + handleError(err) + + content := make([]byte, fileSize) + _, err = rand.Read(content) + handleError(err) + + err = srcFileClient.UploadBuffer(context.Background(), content, nil) + handleError(err) + + destFileName := "file.bin" + destFile, err := os.Create(destFileName) + handleError(err) + defer func(name string) { + err = os.Remove(name) + handleError(err) + }(destFileName) + defer func(destFile *os.File) { + err = destFile.Close() + handleError(err) + }(destFile) + + _, err = srcFileClient.DownloadFile(context.Background(), destFile, nil) + handleError(err) + + _, err = srcFileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_fileClient_UploadFile() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + srcFileName := "testFile" + fileSize := int64(5) + + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + srcFileClient := shareClient.NewRootDirectoryClient().NewFileClient(srcFileName) + _, err = srcFileClient.Create(context.Background(), fileSize, nil) + handleError(err) + + _, content := generateData(int(fileSize)) + err = ioutil.WriteFile(srcFileName, content, 0644) + handleError(err) + defer func() { + err = os.Remove(srcFileName) + handleError(err) + }() + fh, err := os.Open(srcFileName) + handleError(err) + defer func(fh *os.File) { + err := fh.Close() + handleError(err) + }(fh) + + err = srcFileClient.UploadFile(context.Background(), fh, nil) + + destFileName := "file.bin" + destFile, err := os.Create(destFileName) + handleError(err) + defer func(name string) { + err = os.Remove(name) + handleError(err) + }(destFileName) + defer func(destFile *os.File) { + err = destFile.Close() + handleError(err) + }(destFile) + + _, err = srcFileClient.DownloadFile(context.Background(), destFile, nil) + handleError(err) + + _, err = srcFileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_file_ClientGetSASURL() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + srcFileName := "testFile" + fileSize := int64(5) + + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + srcFileClient := shareClient.NewRootDirectoryClient().NewFileClient(srcFileName) + _, err = srcFileClient.Create(context.Background(), fileSize, nil) + handleError(err) + + permission := sas.FilePermissions{Read: true} + start := time.Now() + expiry := start.AddDate(1, 0, 0) + options := file.GetSASURLOptions{StartTime: &start} + sasURL, err := srcFileClient.GetSASURL(permission, expiry, &options) + handleError(err) + _ = sasURL + + _, err = srcFileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_fileClient_Resize() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + srcFileName := "testFile" + fileSize := int64(5) + + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + srcFileClient := shareClient.NewRootDirectoryClient().NewFileClient(srcFileName) + _, err = srcFileClient.Create(context.Background(), fileSize, nil) + handleError(err) + + resp1, err := srcFileClient.GetProperties(context.Background(), nil) + handleError(err) + fmt.Println(*resp1.ContentLength) + + _, err = srcFileClient.Resize(context.Background(), 6, nil) + handleError(err) + + resp1, err = srcFileClient.GetProperties(context.Background(), nil) + handleError(err) + fmt.Println(*resp1.ContentLength) + + _, err = srcFileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_fileClient_UploadRangeFromURL() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + client, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + shareName := "testShare" + srcFileName := "testFile" + dstFileName := "testFile2" + fileSize := int64(5) + + shareClient := client.NewShareClient(shareName) + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + srcFileClient := shareClient.NewRootDirectoryClient().NewFileClient(srcFileName) + _, err = srcFileClient.Create(context.Background(), fileSize, nil) + handleError(err) + + contentR, _ := generateData(int(fileSize)) + + _, err = srcFileClient.UploadRange(context.Background(), 0, contentR, nil) + handleError(err) + + contentSize := 1024 * 8 // 8KB + content := make([]byte, contentSize) + body := bytes.NewReader(content) + rsc := streaming.NopCloser(body) + + _, err = srcFileClient.UploadRange(context.Background(), 0, rsc, nil) + handleError(err) + + perms := sas.FilePermissions{Read: true, Write: true} + sasQueryParams, err := sas.SignatureValues{ + Protocol: sas.ProtocolHTTPS, // Users MUST use HTTPS (not HTTP) + ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration + ShareName: shareName, + FilePath: srcFileName, + Permissions: perms.String(), + }.SignWithSharedKey(cred) + handleError(err) + + srcFileSAS := srcFileClient.URL() + "?" + sasQueryParams.Encode() + + destFClient := shareClient.NewRootDirectoryClient().NewFileClient(dstFileName) + _, err = destFClient.Create(context.Background(), fileSize, nil) + handleError(err) + + _, err = destFClient.UploadRangeFromURL(context.Background(), srcFileSAS, 0, 0, int64(contentSize), nil) + handleError(err) +} diff --git a/sdk/storage/azfile/file/mmf_unix.go b/sdk/storage/azfile/file/mmf_unix.go new file mode 100644 index 000000000000..dc17528e6516 --- /dev/null +++ b/sdk/storage/azfile/file/mmf_unix.go @@ -0,0 +1,38 @@ +//go:build go1.18 && (linux || darwin || freebsd || openbsd || netbsd || solaris) +// +build go1.18 +// +build linux darwin freebsd openbsd netbsd solaris + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "fmt" + "os" + "syscall" +) + +// mmb is a memory mapped buffer +type mmb []byte + +// newMMB creates a new memory mapped buffer with the specified size +func newMMB(size int64) (mmb, error) { + prot, flags := syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE + addr, err := syscall.Mmap(-1, 0, int(size), prot, flags) + if err != nil { + return nil, os.NewSyscallError("Mmap", err) + } + return mmb(addr), nil +} + +// delete cleans up the memory mapped buffer +func (m *mmb) delete() { + err := syscall.Munmap(*m) + *m = nil + if err != nil { + // if we get here, there is likely memory corruption. + // please open an issue https://github.com/Azure/azure-sdk-for-go/issues + panic(fmt.Sprintf("Munmap error: %v", err)) + } +} diff --git a/sdk/storage/azfile/file/mmf_windows.go b/sdk/storage/azfile/file/mmf_windows.go new file mode 100644 index 000000000000..b59e6b415776 --- /dev/null +++ b/sdk/storage/azfile/file/mmf_windows.go @@ -0,0 +1,56 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "fmt" + "os" + "reflect" + "syscall" + "unsafe" +) + +// mmb is a memory mapped buffer +type mmb []byte + +// newMMB creates a new memory mapped buffer with the specified size +func newMMB(size int64) (mmb, error) { + const InvalidHandleValue = ^uintptr(0) // -1 + + prot, access := uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE) + hMMF, err := syscall.CreateFileMapping(syscall.Handle(InvalidHandleValue), nil, prot, uint32(size>>32), uint32(size&0xffffffff), nil) + if err != nil { + return nil, os.NewSyscallError("CreateFileMapping", err) + } + defer func() { + _ = syscall.CloseHandle(hMMF) + }() + + addr, err := syscall.MapViewOfFile(hMMF, access, 0, 0, uintptr(size)) + if err != nil { + return nil, os.NewSyscallError("MapViewOfFile", err) + } + + m := mmb{} + h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) + h.Data = addr + h.Len = int(size) + h.Cap = h.Len + return m, nil +} + +// delete cleans up the memory mapped buffer +func (m *mmb) delete() { + addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0]))) + *m = mmb{} + err := syscall.UnmapViewOfFile(addr) + if err != nil { + // if we get here, there is likely memory corruption. + // please open an issue https://github.com/Azure/azure-sdk-for-go/issues + panic(fmt.Sprintf("UnmapViewOfFile error: %v", err)) + } +} diff --git a/sdk/storage/azfile/file/models.go b/sdk/storage/azfile/file/models.go new file mode 100644 index 000000000000..f27195800f02 --- /dev/null +++ b/sdk/storage/azfile/file/models.go @@ -0,0 +1,743 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "encoding/binary" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + "io" + "time" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// SMBProperties contains the optional parameters regarding the SMB/NTFS properties for a file. +type SMBProperties = exported.SMBProperties + +// NTFSFileAttributes for Files and Directories. +// The subset of attributes is listed at: https://learn.microsoft.com/en-us/rest/api/storageservices/set-file-properties#file-system-attributes. +type NTFSFileAttributes = exported.NTFSFileAttributes + +// Permissions contains the optional parameters for the permissions on the file. +type Permissions = exported.Permissions + +// HTTPHeaders contains optional parameters for the Client.Create method. +type HTTPHeaders = generated.ShareFileHTTPHeaders + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = generated.LeaseAccessConditions + +// SourceModifiedAccessConditions contains a group of parameters for the FileClient.UploadRangeFromURL method. +type SourceModifiedAccessConditions = generated.SourceModifiedAccessConditions + +// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange +// which has an offset but no zero value count indicates from the offset to the resource's end. +type HTTPRange = exported.HTTPRange + +// ShareFileRangeList - The list of file ranges. +type ShareFileRangeList = generated.ShareFileRangeList + +// ClearRange - Ranges there were cleared. +type ClearRange = generated.ClearRange + +// ShareFileRange - An Azure Storage file range. +type ShareFileRange = generated.FileRange + +// --------------------------------------------------------------------------------------------------------------------- + +// CreateOptions contains the optional parameters for the Client.Create method. +type CreateOptions struct { + // The default value is 'None' for Attributes and 'now' for CreationTime and LastWriteTime fields in file.SMBProperties. + SMBProperties *SMBProperties + // The default value is 'inherit' for Permission field in file.Permissions. + Permissions *Permissions + HTTPHeaders *HTTPHeaders + LeaseAccessConditions *LeaseAccessConditions + // A name-value pair to associate with a file storage object. + Metadata map[string]*string +} + +func (o *CreateOptions) format() (fileAttributes string, fileCreationTime string, fileLastWriteTime string, + createOptions *generated.FileClientCreateOptions, fileHTTPHeaders *generated.ShareFileHTTPHeaders, leaseAccessConditions *LeaseAccessConditions) { + if o == nil { + return shared.FileAttributesNone, shared.DefaultCurrentTimeString, shared.DefaultCurrentTimeString, &generated.FileClientCreateOptions{ + FilePermission: to.Ptr(shared.DefaultFilePermissionString), + }, nil, nil + } + + fileAttributes, fileCreationTime, fileLastWriteTime = o.SMBProperties.Format(false, shared.FileAttributesNone, shared.DefaultCurrentTimeString) + + permission, permissionKey := o.Permissions.Format(shared.DefaultFilePermissionString) + + createOptions = &generated.FileClientCreateOptions{ + FilePermission: permission, + FilePermissionKey: permissionKey, + Metadata: o.Metadata, + } + + fileHTTPHeaders = o.HTTPHeaders + leaseAccessConditions = o.LeaseAccessConditions + + return +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DeleteOptions contains the optional parameters for the Client.Delete method. +type DeleteOptions struct { + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *DeleteOptions) format() (*generated.FileClientDeleteOptions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil + } + return nil, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPropertiesOptions contains the optional parameters for the Client.GetProperties method. +type GetPropertiesOptions struct { + // ShareSnapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query for the file properties. + ShareSnapshot *string + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *GetPropertiesOptions) format() (*generated.FileClientGetPropertiesOptions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return &generated.FileClientGetPropertiesOptions{ + Sharesnapshot: o.ShareSnapshot, + }, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetHTTPHeadersOptions contains the optional parameters for the Client.SetHTTPHeaders method. +type SetHTTPHeadersOptions struct { + // Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges + // above the specified byte value are cleared. + FileContentLength *int64 + // The default value is 'preserve' for Attributes, CreationTime and LastWriteTime fields in file.SMBProperties. + SMBProperties *SMBProperties + // The default value is 'preserve' for Permission field in file.Permissions. + Permissions *Permissions + HTTPHeaders *HTTPHeaders + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *SetHTTPHeadersOptions) format() (fileAttributes string, fileCreationTime string, fileLastWriteTime string, + opts *generated.FileClientSetHTTPHeadersOptions, fileHTTPHeaders *generated.ShareFileHTTPHeaders, leaseAccessConditions *LeaseAccessConditions) { + if o == nil { + return shared.DefaultPreserveString, shared.DefaultPreserveString, shared.DefaultPreserveString, &generated.FileClientSetHTTPHeadersOptions{ + FilePermission: to.Ptr(shared.DefaultPreserveString), + }, nil, nil + } + + fileAttributes, fileCreationTime, fileLastWriteTime = o.SMBProperties.Format(false, shared.DefaultPreserveString, shared.DefaultPreserveString) + + permission, permissionKey := o.Permissions.Format(shared.DefaultPreserveString) + + opts = &generated.FileClientSetHTTPHeadersOptions{ + FileContentLength: o.FileContentLength, + FilePermission: permission, + FilePermissionKey: permissionKey, + } + + fileHTTPHeaders = o.HTTPHeaders + leaseAccessConditions = o.LeaseAccessConditions + + return +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetMetadataOptions contains the optional parameters for the Client.SetMetadata method. +type SetMetadataOptions struct { + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *SetMetadataOptions) format() (*generated.FileClientSetMetadataOptions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil + } + return &generated.FileClientSetMetadataOptions{ + Metadata: o.Metadata, + }, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// StartCopyFromURLOptions contains the optional parameters for the Client.StartCopyFromURL method. +type StartCopyFromURLOptions struct { + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // required if x-ms-file-permission-copy-mode is specified as override + Permissions *Permissions + CopyFileSMBInfo *CopyFileSMBInfo + // LeaseAccessConditions contains optional parameters to access leased entity. + // Required if the destination file has an active lease. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *StartCopyFromURLOptions) format() (*generated.FileClientStartCopyOptions, *generated.CopyFileSMBInfo, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil + } + + var permission, permissionKey *string + if o.Permissions != nil { + permission = o.Permissions.Permission + permissionKey = o.Permissions.PermissionKey + } + + opts := &generated.FileClientStartCopyOptions{ + FilePermission: permission, + FilePermissionKey: permissionKey, + Metadata: o.Metadata, + } + return opts, o.CopyFileSMBInfo.format(), o.LeaseAccessConditions +} + +// CopyFileSMBInfo contains a group of parameters for the FileClient.StartCopy method. +type CopyFileSMBInfo struct { + // Specifies either the option to copy file attributes from a source file(source) to a target file or a list of attributes + // to set on a target file. + Attributes CopyFileAttributes + // Specifies either the option to copy file creation time from a source file(source) to a target file or a time value in ISO + // 8601 format to set as creation time on a target file. + CreationTime CopyFileCreationTime + // Specifies either the option to copy file last write time from a source file(source) to a target file or a time value in + // ISO 8601 format to set as last write time on a target file. + LastWriteTime CopyFileLastWriteTime + // Specifies the option to copy file security descriptor from source file or to set it using the value which is defined by + // the header value of x-ms-file-permission or x-ms-file-permission-key. + PermissionCopyMode *PermissionCopyModeType + // Specifies the option to overwrite the target file if it already exists and has read-only attribute set. + IgnoreReadOnly *bool + // Specifies the option to set archive attribute on a target file. True means archive attribute will be set on a target file + // despite attribute overrides or a source file state. + SetArchiveAttribute *bool +} + +func (c *CopyFileSMBInfo) format() *generated.CopyFileSMBInfo { + if c == nil { + return nil + } + + opts := &generated.CopyFileSMBInfo{ + FilePermissionCopyMode: c.PermissionCopyMode, + IgnoreReadOnly: c.IgnoreReadOnly, + SetArchiveAttribute: c.SetArchiveAttribute, + } + + if c.Attributes != nil { + opts.FileAttributes = c.Attributes.FormatAttributes() + } + if c.CreationTime != nil { + opts.FileCreationTime = c.CreationTime.FormatCreationTime() + } + if c.LastWriteTime != nil { + opts.FileLastWriteTime = c.LastWriteTime.FormatLastWriteTime() + } + + return opts +} + +// CopyFileAttributes specifies either the option to copy file attributes from a source file(source) to a target file or +// a list of attributes to set on a target file. +type CopyFileAttributes = exported.CopyFileAttributes + +// SourceCopyFileAttributes specifies to copy file attributes from a source file(source) to a target file +type SourceCopyFileAttributes = exported.SourceCopyFileAttributes + +// DestinationCopyFileAttributes specifies a list of attributes to set on a target file. +type DestinationCopyFileAttributes = exported.DestinationCopyFileAttributes + +// CopyFileCreationTime specifies either the option to copy file creation time from a source file(source) to a target file or +// a time value in ISO 8601 format to set as creation time on a target file. +type CopyFileCreationTime = exported.CopyFileCreationTime + +// SourceCopyFileCreationTime specifies to copy file creation time from a source file(source) to a target file. +type SourceCopyFileCreationTime = exported.SourceCopyFileCreationTime + +// DestinationCopyFileCreationTime specifies a time value in ISO 8601 format to set as creation time on a target file. +type DestinationCopyFileCreationTime = exported.DestinationCopyFileCreationTime + +// CopyFileLastWriteTime specifies either the option to copy file last write time from a source file(source) to a target file or +// a time value in ISO 8601 format to set as last write time on a target file. +type CopyFileLastWriteTime = exported.CopyFileLastWriteTime + +// SourceCopyFileLastWriteTime specifies to copy file last write time from a source file(source) to a target file. +type SourceCopyFileLastWriteTime = exported.SourceCopyFileLastWriteTime + +// DestinationCopyFileLastWriteTime specifies a time value in ISO 8601 format to set as last write time on a target file. +type DestinationCopyFileLastWriteTime = exported.DestinationCopyFileLastWriteTime + +// --------------------------------------------------------------------------------------------------------------------- + +// AbortCopyOptions contains the optional parameters for the Client.AbortCopy method. +type AbortCopyOptions struct { + // LeaseAccessConditions contains optional parameters to access leased entity. + // Required if the destination file has an active lease. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *AbortCopyOptions) format() (*generated.FileClientAbortCopyOptions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DownloadStreamOptions contains the optional parameters for the Client.DownloadStream method. +type DownloadStreamOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + // When this header is set to true and specified together with the Range header, the service returns the MD5 hash for the + // range, as long as the range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + // LeaseAccessConditions contains optional parameters to access leased entity. + // If specified, the operation is performed only if the file's lease is currently active and + // the lease ID that's specified in the request matches the lease ID of the file. + // Otherwise, the operation fails with status code 412 (Precondition Failed). + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *DownloadStreamOptions) format() (*generated.FileClientDownloadOptions, *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + return &generated.FileClientDownloadOptions{ + Range: exported.FormatHTTPRange(o.Range), + RangeGetContentMD5: o.RangeGetContentMD5, + }, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// downloadOptions contains common options used by the Client.DownloadBuffer and Client.DownloadFile methods. +type downloadOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + // ChunkSize specifies the chunk size to use for each parallel download; the default size is 4MB. + ChunkSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions + + // Concurrency indicates the maximum number of chunks to download in parallel (0=default). + Concurrency uint16 + + // RetryReaderOptionsPerChunk is used when downloading each chunk. + RetryReaderOptionsPerChunk RetryReaderOptions +} + +func (o *downloadOptions) getFilePropertiesOptions() *GetPropertiesOptions { + if o == nil { + return nil + } + return &GetPropertiesOptions{ + LeaseAccessConditions: o.LeaseAccessConditions, + } +} + +func (o *downloadOptions) getDownloadFileOptions(rng HTTPRange) *DownloadStreamOptions { + downloadFileOptions := &DownloadStreamOptions{ + Range: rng, + } + if o != nil { + downloadFileOptions.LeaseAccessConditions = o.LeaseAccessConditions + } + return downloadFileOptions +} + +// DownloadBufferOptions contains the optional parameters for the Client.DownloadBuffer method. +type DownloadBufferOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + // ChunkSize specifies the chunk size to use for each parallel download; the default size is 4MB. + ChunkSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions + + // Concurrency indicates the maximum number of chunks to download in parallel (0=default). + Concurrency uint16 + + // RetryReaderOptionsPerChunk is used when downloading each chunk. + RetryReaderOptionsPerChunk RetryReaderOptions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DownloadFileOptions contains the optional parameters for the Client.DownloadFile method. +type DownloadFileOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + // ChunkSize specifies the chunk size to use for each parallel download; the default size is 4MB. + ChunkSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions + + // Concurrency indicates the maximum number of chunks to download in parallel (0=default). + Concurrency uint16 + + // RetryReaderOptionsPerChunk is used when downloading each chunk. + RetryReaderOptionsPerChunk RetryReaderOptions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ResizeOptions contains the optional parameters for the Client.Resize method. +type ResizeOptions struct { + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *ResizeOptions) format(contentLength int64) (fileAttributes string, fileCreationTime string, fileLastWriteTime string, + opts *generated.FileClientSetHTTPHeadersOptions, leaseAccessConditions *LeaseAccessConditions) { + fileAttributes, fileCreationTime, fileLastWriteTime = shared.DefaultPreserveString, shared.DefaultPreserveString, shared.DefaultPreserveString + + opts = &generated.FileClientSetHTTPHeadersOptions{ + FileContentLength: &contentLength, + FilePermission: to.Ptr(shared.DefaultPreserveString), + } + + if o != nil { + leaseAccessConditions = o.LeaseAccessConditions + } + + return +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UploadRangeOptions contains the optional parameters for the Client.UploadRange method. +type UploadRangeOptions struct { + // TransactionalValidation specifies the transfer validation type to use. + // The default is nil (no transfer validation). + TransactionalValidation TransferValidationType + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *UploadRangeOptions) format(offset int64, body io.ReadSeekCloser) (string, int64, *generated.FileClientUploadRangeOptions, *generated.LeaseAccessConditions, error) { + if offset < 0 || body == nil { + return "", 0, nil, nil, errors.New("invalid argument: offset must be >= 0 and body must not be nil") + } + + count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) + if err != nil { + return "", 0, nil, nil, err + } + + if count == 0 { + return "", 0, nil, nil, errors.New("invalid argument: body must contain readable data whose size is > 0") + } + + httpRange := exported.FormatHTTPRange(HTTPRange{ + Offset: offset, + Count: count, + }) + rangeParam := "" + if httpRange != nil { + rangeParam = *httpRange + } + + var leaseAccessConditions *LeaseAccessConditions + uploadRangeOptions := &generated.FileClientUploadRangeOptions{} + + if o != nil { + leaseAccessConditions = o.LeaseAccessConditions + } + if o != nil && o.TransactionalValidation != nil { + _, err = o.TransactionalValidation.Apply(body, uploadRangeOptions) + if err != nil { + return "", 0, nil, nil, err + } + } + + return rangeParam, count, uploadRangeOptions, leaseAccessConditions, nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ClearRangeOptions contains the optional parameters for the Client.ClearRange method. +type ClearRangeOptions struct { + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *ClearRangeOptions) format(contentRange HTTPRange) (string, *generated.LeaseAccessConditions, error) { + httpRange := exported.FormatHTTPRange(contentRange) + if httpRange == nil || contentRange.Offset < 0 || contentRange.Count <= 0 { + return "", nil, errors.New("invalid argument: either offset is < 0 or count <= 0") + } + + if o == nil { + return *httpRange, nil, nil + } + + return *httpRange, o.LeaseAccessConditions, nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UploadRangeFromURLOptions contains the optional parameters for the Client.UploadRangeFromURL method. +type UploadRangeFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentCRC64 uint64 + SourceModifiedAccessConditions *SourceModifiedAccessConditions + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *UploadRangeFromURLOptions) format(sourceOffset int64, destinationOffset int64, count int64) (string, *generated.FileClientUploadRangeFromURLOptions, *generated.SourceModifiedAccessConditions, *generated.LeaseAccessConditions, error) { + if sourceOffset < 0 || destinationOffset < 0 { + return "", nil, nil, nil, errors.New("invalid argument: source and destination offsets must be >= 0") + } + + httpRangeSrc := exported.FormatHTTPRange(HTTPRange{Offset: sourceOffset, Count: count}) + httpRangeDest := exported.FormatHTTPRange(HTTPRange{Offset: destinationOffset, Count: count}) + destRange := "" + if httpRangeDest != nil { + destRange = *httpRangeDest + } + + opts := &generated.FileClientUploadRangeFromURLOptions{ + SourceRange: httpRangeSrc, + } + + var sourceModifiedAccessConditions *SourceModifiedAccessConditions + var leaseAccessConditions *LeaseAccessConditions + + if o != nil { + opts.CopySourceAuthorization = o.CopySourceAuthorization + sourceModifiedAccessConditions = o.SourceModifiedAccessConditions + leaseAccessConditions = o.LeaseAccessConditions + + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, o.SourceContentCRC64) + opts.SourceContentCRC64 = buf + } + + return destRange, opts, sourceModifiedAccessConditions, leaseAccessConditions, nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetRangeListOptions contains the optional parameters for the Client.GetRangeList method. +type GetRangeListOptions struct { + // The previous snapshot parameter is an opaque DateTime value that, when present, specifies the previous snapshot. + PrevShareSnapshot *string + // Specifies the range of bytes over which to list ranges, inclusively. + Range HTTPRange + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *GetRangeListOptions) format() (*generated.FileClientGetRangeListOptions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return &generated.FileClientGetRangeListOptions{ + Prevsharesnapshot: o.PrevShareSnapshot, + Range: exported.FormatHTTPRange(o.Range), + Sharesnapshot: o.ShareSnapshot, + }, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. +type GetSASURLOptions struct { + StartTime *time.Time +} + +func (o *GetSASURLOptions) format() time.Time { + if o == nil { + return time.Time{} + } + + var st time.Time + if o.StartTime != nil { + st = o.StartTime.UTC() + } else { + st = time.Time{} + } + return st +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ForceCloseHandlesOptions contains the optional parameters for the Client.ForceCloseHandles method. +type ForceCloseHandlesOptions struct { + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the response body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string +} + +func (o *ForceCloseHandlesOptions) format() *generated.FileClientForceCloseHandlesOptions { + if o == nil { + return nil + } + + return &generated.FileClientForceCloseHandlesOptions{ + Marker: o.Marker, + Sharesnapshot: o.ShareSnapshot, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ListHandlesOptions contains the optional parameters for the Client.ListHandles method. +type ListHandlesOptions struct { + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the response body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + // Specifies the maximum number of entries to return. If the request does not specify maxresults, or specifies a value greater + // than 5,000, the server will return up to 5,000 items. + MaxResults *int32 + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string +} + +func (o *ListHandlesOptions) format() *generated.FileClientListHandlesOptions { + if o == nil { + return nil + } + + return &generated.FileClientListHandlesOptions{ + Marker: o.Marker, + Maxresults: o.MaxResults, + Sharesnapshot: o.ShareSnapshot, + } +} + +// Handle - A listed Azure Storage handle item. +type Handle = generated.Handle + +// --------------------------------------------------------------------------------------------------------------------- + +// uploadFromReaderOptions identifies options used by the UploadBuffer and UploadFile functions. +type uploadFromReaderOptions struct { + // ChunkSize specifies the chunk size to use in bytes; the default (and maximum size) is MaxUpdateRangeBytes. + ChunkSize int64 + + // Progress is a function that is invoked periodically as bytes are sent to the FileClient. + // Note that the progress reporting is not always increasing; it can go down when retrying a request. + Progress func(bytesTransferred int64) + + // Concurrency indicates the maximum number of chunks to upload in parallel (default is 5) + Concurrency uint16 + + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +// UploadBufferOptions provides set of configurations for Client.UploadBuffer operation. +type UploadBufferOptions = uploadFromReaderOptions + +// UploadFileOptions provides set of configurations for Client.UploadFile operation. +type UploadFileOptions = uploadFromReaderOptions + +func (o *uploadFromReaderOptions) getUploadRangeOptions() *UploadRangeOptions { + return &UploadRangeOptions{ + LeaseAccessConditions: o.LeaseAccessConditions, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UploadStreamOptions provides set of configurations for Client.UploadStream operation. +type UploadStreamOptions struct { + // ChunkSize defines the size of the buffer used during upload. The default and minimum value is 1 MiB. + // Maximum size of a chunk is MaxUpdateRangeBytes. + ChunkSize int64 + + // Concurrency defines the max number of concurrent uploads to be performed to upload the file. + // Each concurrent upload will create a buffer of size ChunkSize. The default value is one. + Concurrency int + + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (u *UploadStreamOptions) setDefaults() { + if u.Concurrency == 0 { + u.Concurrency = 1 + } + + if u.ChunkSize < _1MiB { + u.ChunkSize = _1MiB + } +} + +func (u *UploadStreamOptions) getUploadRangeOptions() *UploadRangeOptions { + return &UploadRangeOptions{ + LeaseAccessConditions: u.LeaseAccessConditions, + } +} + +// URLParts object represents the components that make up an Azure Storage Container/Blob URL. +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type URLParts = sas.URLParts + +// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. Any other +// query parameters remain in the UnparsedParams field. This method overwrites all fields in the URLParts object. +func ParseURL(u string) (URLParts, error) { + return sas.ParseURL(u) +} diff --git a/sdk/storage/azfile/file/responses.go b/sdk/storage/azfile/file/responses.go new file mode 100644 index 000000000000..e47d87741861 --- /dev/null +++ b/sdk/storage/azfile/file/responses.go @@ -0,0 +1,93 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "io" +) + +// CreateResponse contains the response from method Client.Create. +type CreateResponse = generated.FileClientCreateResponse + +// DeleteResponse contains the response from method Client.Delete. +type DeleteResponse = generated.FileClientDeleteResponse + +// GetPropertiesResponse contains the response from method Client.GetProperties. +type GetPropertiesResponse = generated.FileClientGetPropertiesResponse + +// SetMetadataResponse contains the response from method Client.SetMetadata. +type SetMetadataResponse = generated.FileClientSetMetadataResponse + +// SetHTTPHeadersResponse contains the response from method Client.SetHTTPHeaders. +type SetHTTPHeadersResponse = generated.FileClientSetHTTPHeadersResponse + +// StartCopyFromURLResponse contains the response from method Client.StartCopyFromURL. +type StartCopyFromURLResponse = generated.FileClientStartCopyResponse + +// AbortCopyResponse contains the response from method Client.AbortCopy. +type AbortCopyResponse = generated.FileClientAbortCopyResponse + +// DownloadResponse contains the response from method FileClient.Download. +type DownloadResponse = generated.FileClientDownloadResponse + +// DownloadStreamResponse contains the response from method Client.DownloadStream. +// To read from the stream, read from the Body field, or call the NewRetryReader method. +type DownloadStreamResponse struct { + DownloadResponse + + client *Client + getInfo httpGetterInfo + leaseAccessConditions *LeaseAccessConditions +} + +// NewRetryReader constructs new RetryReader stream for reading data. If a connection fails while +// reading, it will make additional requests to reestablish a connection and continue reading. +// Pass nil for options to accept the default options. +// Callers of this method should not access the DownloadStreamResponse.Body field. +func (r *DownloadStreamResponse) NewRetryReader(ctx context.Context, options *RetryReaderOptions) *RetryReader { + if options == nil { + options = &RetryReaderOptions{} + } + + return newRetryReader(ctx, r.Body, r.getInfo, func(ctx context.Context, getInfo httpGetterInfo) (io.ReadCloser, error) { + options := DownloadStreamOptions{ + Range: getInfo.Range, + LeaseAccessConditions: r.leaseAccessConditions, + } + resp, err := r.client.DownloadStream(ctx, &options) + if err != nil { + return nil, err + } + return resp.Body, err + }, *options) +} + +// ResizeResponse contains the response from method Client.Resize. +type ResizeResponse = generated.FileClientSetHTTPHeadersResponse + +// UploadRangeResponse contains the response from method Client.UploadRange. +type UploadRangeResponse = generated.FileClientUploadRangeResponse + +// ClearRangeResponse contains the response from method Client.ClearRange. +type ClearRangeResponse = generated.FileClientUploadRangeResponse + +// UploadRangeFromURLResponse contains the response from method Client.UploadRangeFromURL. +type UploadRangeFromURLResponse = generated.FileClientUploadRangeFromURLResponse + +// GetRangeListResponse contains the response from method Client.GetRangeList. +type GetRangeListResponse = generated.FileClientGetRangeListResponse + +// ForceCloseHandlesResponse contains the response from method Client.ForceCloseHandles. +type ForceCloseHandlesResponse = generated.FileClientForceCloseHandlesResponse + +// ListHandlesResponse contains the response from method Client.ListHandles. +type ListHandlesResponse = generated.FileClientListHandlesResponse + +// ListHandlesSegmentResponse - An enumeration of handles. +type ListHandlesSegmentResponse = generated.ListHandlesResponse diff --git a/sdk/storage/azfile/file/retry_reader.go b/sdk/storage/azfile/file/retry_reader.go new file mode 100644 index 000000000000..2e76a91f3169 --- /dev/null +++ b/sdk/storage/azfile/file/retry_reader.go @@ -0,0 +1,186 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "context" + "io" + "net" + "strings" + "sync" +) + +// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation. +type httpGetter func(ctx context.Context, i httpGetterInfo) (io.ReadCloser, error) + +// httpGetterInfo is passed to an HTTPGetter function passing it parameters +// that should be used to make an HTTP GET request. +type httpGetterInfo struct { + Range HTTPRange +} + +// RetryReaderOptions configures the retry reader's behavior. +// Zero-value fields will have their specified default values applied during use. +// This allows for modification of a subset of fields. +type RetryReaderOptions struct { + // MaxRetries specifies the maximum number of attempts a failed read will be retried + // before producing an error. + // The default value is three. + MaxRetries int32 + + // OnFailedRead, when non-nil, is called after any failure to read. Expected usage is diagnostic logging. + OnFailedRead func(failureCount int32, lastError error, rnge HTTPRange, willRetry bool) + + // EarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default, + // retryReader has the following special behaviour: closing the response body before it is all read is treated as a + // retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the = + // read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If + // TreatEarlyCloseAsError is true, then retryReader's special behaviour is suppressed, and "read on closed body" is instead + // treated as a fatal (non-retryable) error. + // Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens + // from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors + // which will be retried. + // The default value is false. + EarlyCloseAsError bool + + doInjectError bool + doInjectErrorRound int32 + injectedError error +} + +// RetryReader attempts to read from response, and if there is a retry-able network error +// returned during reading, it will retry according to retry reader option through executing +// user defined action with provided data to get a new response, and continue the overall reading process +// through reading from the new response. +// RetryReader implements the io.ReadCloser interface. +type RetryReader struct { + ctx context.Context + info httpGetterInfo + retryReaderOptions RetryReaderOptions + getter httpGetter + countWasBounded bool + + // we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response + responseMu *sync.Mutex + response io.ReadCloser +} + +// newRetryReader creates a retry reader. +func newRetryReader(ctx context.Context, initialResponse io.ReadCloser, info httpGetterInfo, getter httpGetter, o RetryReaderOptions) *RetryReader { + if o.MaxRetries < 1 { + o.MaxRetries = 3 + } + return &RetryReader{ + ctx: ctx, + getter: getter, + info: info, + countWasBounded: info.Range.Count != CountToEnd, + response: initialResponse, + responseMu: &sync.Mutex{}, + retryReaderOptions: o, + } +} + +// setResponse function +func (s *RetryReader) setResponse(r io.ReadCloser) { + s.responseMu.Lock() + defer s.responseMu.Unlock() + s.response = r +} + +// Read from retry reader +func (s *RetryReader) Read(p []byte) (n int, err error) { + for try := int32(0); ; try++ { + //fmt.Println(try) // Comment out for debugging. + if s.countWasBounded && s.info.Range.Count == CountToEnd { + // User specified an original count and the remaining bytes are 0, return 0, EOF + return 0, io.EOF + } + + s.responseMu.Lock() + resp := s.response + s.responseMu.Unlock() + if resp == nil { // We don't have a response stream to read from, try to get one. + newResponse, err := s.getter(s.ctx, s.info) + if err != nil { + return 0, err + } + // Successful GET; this is the network stream we'll read from. + s.setResponse(newResponse) + resp = newResponse + } + n, err := resp.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running) + + // Injection mechanism for testing. + if s.retryReaderOptions.doInjectError && try == s.retryReaderOptions.doInjectErrorRound { + if s.retryReaderOptions.injectedError != nil { + err = s.retryReaderOptions.injectedError + } else { + err = &net.DNSError{IsTemporary: true} + } + } + + // We successfully read data or end EOF. + if err == nil || err == io.EOF { + s.info.Range.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future + if s.info.Range.Count != CountToEnd { + s.info.Range.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future + } + return n, err // Return the return to the caller + } + _ = s.Close() + + s.setResponse(nil) // Our stream is no longer good + + // Check the retry count and error code, and decide whether to retry. + retriesExhausted := try >= s.retryReaderOptions.MaxRetries + _, isNetError := err.(net.Error) + isUnexpectedEOF := err == io.ErrUnexpectedEOF + willRetry := (isNetError || isUnexpectedEOF || s.wasRetryableEarlyClose(err)) && !retriesExhausted + + // Notify, for logging purposes, of any failures + if s.retryReaderOptions.OnFailedRead != nil { + failureCount := try + 1 // because try is zero-based + s.retryReaderOptions.OnFailedRead(failureCount, err, s.info.Range, willRetry) + } + + if willRetry { + continue + // Loop around and try to get and read from new stream. + } + return n, err // Not retryable, or retries exhausted, so just return + } +} + +// By default, we allow early Closing, from another concurrent goroutine, to be used to force a retry +// Is this safe, to close early from another goroutine? Early close ultimately ends up calling +// net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors" +// which is exactly the behaviour we want. +// NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read) +// then there are two different types of error that may happen - either the one we check for here, +// or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine +// to check for one, since the other is a net.Error, which our main Read retry loop is already handing. +func (s *RetryReader) wasRetryableEarlyClose(err error) bool { + if s.retryReaderOptions.EarlyCloseAsError { + return false // user wants all early closes to be errors, and so not retryable + } + // unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text + return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage) +} + +// ReadOnClosedBodyMessage of retry reader +const ReadOnClosedBodyMessage = "read on closed response body" + +// Close retry reader +func (s *RetryReader) Close() error { + s.responseMu.Lock() + defer s.responseMu.Unlock() + if s.response != nil { + return s.response.Close() + } + return nil +} diff --git a/sdk/storage/azfile/fileerror/error_codes.go b/sdk/storage/azfile/fileerror/error_codes.go new file mode 100644 index 000000000000..c897c0953828 --- /dev/null +++ b/sdk/storage/azfile/fileerror/error_codes.go @@ -0,0 +1,107 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package fileerror + +import ( + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" +) + +// HasCode returns true if the provided error is an *azcore.ResponseError +// with its ErrorCode field equal to one of the specified Codes. +func HasCode(err error, codes ...Code) bool { + var respErr *azcore.ResponseError + if !errors.As(err, &respErr) { + return false + } + + for _, code := range codes { + if respErr.ErrorCode == string(code) { + return true + } + } + + return false +} + +// Code - Error codes returned by the service +type Code = generated.StorageErrorCode + +const ( + AccountAlreadyExists Code = "AccountAlreadyExists" + AccountBeingCreated Code = "AccountBeingCreated" + AccountIsDisabled Code = "AccountIsDisabled" + AuthenticationFailed Code = "AuthenticationFailed" + AuthorizationFailure Code = "AuthorizationFailure" + AuthorizationPermissionMismatch Code = "AuthorizationPermissionMismatch" + AuthorizationProtocolMismatch Code = "AuthorizationProtocolMismatch" + AuthorizationResourceTypeMismatch Code = "AuthorizationResourceTypeMismatch" + AuthorizationServiceMismatch Code = "AuthorizationServiceMismatch" + AuthorizationSourceIPMismatch Code = "AuthorizationSourceIPMismatch" + CannotDeleteFileOrDirectory Code = "CannotDeleteFileOrDirectory" + ClientCacheFlushDelay Code = "ClientCacheFlushDelay" + ConditionHeadersNotSupported Code = "ConditionHeadersNotSupported" + ConditionNotMet Code = "ConditionNotMet" + DeletePending Code = "DeletePending" + DirectoryNotEmpty Code = "DirectoryNotEmpty" + EmptyMetadataKey Code = "EmptyMetadataKey" + FeatureVersionMismatch Code = "FeatureVersionMismatch" + FileLockConflict Code = "FileLockConflict" + InsufficientAccountPermissions Code = "InsufficientAccountPermissions" + InternalError Code = "InternalError" + InvalidAuthenticationInfo Code = "InvalidAuthenticationInfo" + InvalidFileOrDirectoryPathName Code = "InvalidFileOrDirectoryPathName" + InvalidHTTPVerb Code = "InvalidHttpVerb" + InvalidHeaderValue Code = "InvalidHeaderValue" + InvalidInput Code = "InvalidInput" + InvalidMD5 Code = "InvalidMd5" + InvalidMetadata Code = "InvalidMetadata" + InvalidQueryParameterValue Code = "InvalidQueryParameterValue" + InvalidRange Code = "InvalidRange" + InvalidResourceName Code = "InvalidResourceName" + InvalidURI Code = "InvalidUri" + InvalidXMLDocument Code = "InvalidXmlDocument" + InvalidXMLNodeValue Code = "InvalidXmlNodeValue" + MD5Mismatch Code = "Md5Mismatch" + MetadataTooLarge Code = "MetadataTooLarge" + MissingContentLengthHeader Code = "MissingContentLengthHeader" + MissingRequiredHeader Code = "MissingRequiredHeader" + MissingRequiredQueryParameter Code = "MissingRequiredQueryParameter" + MissingRequiredXMLNode Code = "MissingRequiredXmlNode" + MultipleConditionHeadersNotSupported Code = "MultipleConditionHeadersNotSupported" + OperationTimedOut Code = "OperationTimedOut" + OutOfRangeInput Code = "OutOfRangeInput" + OutOfRangeQueryParameterValue Code = "OutOfRangeQueryParameterValue" + ParentNotFound Code = "ParentNotFound" + ReadOnlyAttribute Code = "ReadOnlyAttribute" + RequestBodyTooLarge Code = "RequestBodyTooLarge" + RequestURLFailedToParse Code = "RequestUrlFailedToParse" + ResourceAlreadyExists Code = "ResourceAlreadyExists" + ResourceNotFound Code = "ResourceNotFound" + ResourceTypeMismatch Code = "ResourceTypeMismatch" + ServerBusy Code = "ServerBusy" + ShareAlreadyExists Code = "ShareAlreadyExists" + ShareBeingDeleted Code = "ShareBeingDeleted" + ShareDisabled Code = "ShareDisabled" + ShareHasSnapshots Code = "ShareHasSnapshots" + ShareNotFound Code = "ShareNotFound" + ShareSnapshotCountExceeded Code = "ShareSnapshotCountExceeded" + ShareSnapshotInProgress Code = "ShareSnapshotInProgress" + ShareSnapshotOperationNotSupported Code = "ShareSnapshotOperationNotSupported" + SharingViolation Code = "SharingViolation" + UnsupportedHTTPVerb Code = "UnsupportedHttpVerb" + UnsupportedHeader Code = "UnsupportedHeader" + UnsupportedQueryParameter Code = "UnsupportedQueryParameter" + UnsupportedXMLNode Code = "UnsupportedXmlNode" +) + +var ( + // MissingSharedKeyCredential - Error is returned when SAS URL is being created without SharedKeyCredential. + MissingSharedKeyCredential = errors.New("SAS can only be signed with a SharedKeyCredential") +) diff --git a/sdk/storage/azfile/go.mod b/sdk/storage/azfile/go.mod new file mode 100644 index 000000000000..cbd96fa64efc --- /dev/null +++ b/sdk/storage/azfile/go.mod @@ -0,0 +1,28 @@ +module github.com/Azure/azure-sdk-for-go/sdk/storage/azfile + +go 1.18 + +require ( + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 + github.com/stretchr/testify v1.7.1 +) + +require ( + github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dnaeon/go-vcr v1.1.0 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + golang.org/x/crypto v0.7.0 // indirect + golang.org/x/net v0.8.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect +) diff --git a/sdk/storage/azfile/go.sum b/sdk/storage/azfile/go.sum new file mode 100644 index 000000000000..8f03fb9639d6 --- /dev/null +++ b/sdk/storage/azfile/go.sum @@ -0,0 +1,46 @@ +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 h1:rTnT/Jrcm+figWlYz4Ixzt0SJVR2cMC8lvZcimipiEY= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0/go.mod h1:ON4tFdPTwRcgWEaVDrN3584Ef+b7GgSJaXxe5fW9t4M= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 h1:uqM+VoHjVH6zdlkLF2b6O0ZANcHoj3rO0PoQ3jglUJA= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2/go.mod h1:twTKAa1E6hLmSDjLhaCkbTMQKc7p/rNLU40rLxGEOCI= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag= +github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 h1:UE9n9rkJF62ArLb1F3DEjRt8O3jLwMWdSoypKV4f3MU= +github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= +github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/sdk/storage/azfile/internal/base/clients.go b/sdk/storage/azfile/internal/base/clients.go new file mode 100644 index 000000000000..93317d4dc29b --- /dev/null +++ b/sdk/storage/azfile/internal/base/clients.go @@ -0,0 +1,60 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package base + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + +type Client[T any] struct { + inner *T + sharedKey *exported.SharedKeyCredential +} + +func InnerClient[T any](client *Client[T]) *T { + return client.inner +} + +func SharedKey[T any](client *Client[T]) *exported.SharedKeyCredential { + return client.sharedKey +} + +func NewServiceClient(serviceURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.ServiceClient] { + return &Client[generated.ServiceClient]{ + inner: generated.NewServiceClient(serviceURL, pipeline), + sharedKey: sharedKey, + } +} + +func NewShareClient(shareURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.ShareClient] { + return &Client[generated.ShareClient]{ + inner: generated.NewShareClient(shareURL, pipeline), + sharedKey: sharedKey, + } +} + +func NewDirectoryClient(directoryURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.DirectoryClient] { + return &Client[generated.DirectoryClient]{ + inner: generated.NewDirectoryClient(directoryURL, pipeline), + sharedKey: sharedKey, + } +} + +func NewFileClient(fileURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.FileClient] { + return &Client[generated.FileClient]{ + inner: generated.NewFileClient(fileURL, pipeline), + sharedKey: sharedKey, + } +} diff --git a/sdk/storage/azfile/internal/exported/access_policy.go b/sdk/storage/azfile/internal/exported/access_policy.go new file mode 100644 index 000000000000..d9c95db2821a --- /dev/null +++ b/sdk/storage/azfile/internal/exported/access_policy.go @@ -0,0 +1,62 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "bytes" + "fmt" +) + +// The AccessPolicyPermission type simplifies creating the permissions string for a share's access policy. +// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. +type AccessPolicyPermission struct { + Read, Create, Write, Delete, List bool +} + +// String produces the access policy permission string for an Azure Storage share. +// Call this method to set AccessPolicy's Permission field. +func (p AccessPolicyPermission) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.List { + b.WriteRune('l') + } + return b.String() +} + +// Parse initializes the AccessPolicyPermission's fields from a string. +func (p *AccessPolicyPermission) Parse(s string) error { + *p = AccessPolicyPermission{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + default: + return fmt.Errorf("invalid permission: '%v'", r) + } + } + return nil +} diff --git a/sdk/storage/azfile/internal/exported/copy_file_smb_options.go b/sdk/storage/azfile/internal/exported/copy_file_smb_options.go new file mode 100644 index 000000000000..9f0da40bba4d --- /dev/null +++ b/sdk/storage/azfile/internal/exported/copy_file_smb_options.go @@ -0,0 +1,96 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "time" +) + +// CopyFileCreationTime specifies either the option to copy file creation time from a source file(source) to a target file or +// a time value in ISO 8601 format to set as creation time on a target file. +type CopyFileCreationTime interface { + FormatCreationTime() *string + notPubliclyImplementable() +} + +// SourceCopyFileCreationTime specifies to copy file creation time from a source file(source) to a target file. +type SourceCopyFileCreationTime struct { +} + +func (s SourceCopyFileCreationTime) FormatCreationTime() *string { + return to.Ptr("source") +} + +func (s SourceCopyFileCreationTime) notPubliclyImplementable() {} + +// DestinationCopyFileCreationTime specifies a time value in ISO 8601 format to set as creation time on a target file. +type DestinationCopyFileCreationTime time.Time + +func (d DestinationCopyFileCreationTime) FormatCreationTime() *string { + return to.Ptr(time.Time(d).UTC().Format(generated.ISO8601)) +} + +func (d DestinationCopyFileCreationTime) notPubliclyImplementable() {} + +// --------------------------------------------------------------------------------------------------------------------- + +// CopyFileLastWriteTime specifies either the option to copy file last write time from a source file(source) to a target file or +// a time value in ISO 8601 format to set as last write time on a target file. +type CopyFileLastWriteTime interface { + FormatLastWriteTime() *string + notPubliclyImplementable() +} + +// SourceCopyFileLastWriteTime specifies to copy file last write time from a source file(source) to a target file. +type SourceCopyFileLastWriteTime struct { +} + +func (s SourceCopyFileLastWriteTime) FormatLastWriteTime() *string { + return to.Ptr("source") +} + +func (s SourceCopyFileLastWriteTime) notPubliclyImplementable() {} + +// DestinationCopyFileLastWriteTime specifies a time value in ISO 8601 format to set as last write time on a target file. +type DestinationCopyFileLastWriteTime time.Time + +func (d DestinationCopyFileLastWriteTime) FormatLastWriteTime() *string { + return to.Ptr(time.Time(d).UTC().Format(generated.ISO8601)) +} + +func (d DestinationCopyFileLastWriteTime) notPubliclyImplementable() {} + +// --------------------------------------------------------------------------------------------------------------------- + +// CopyFileAttributes specifies either the option to copy file attributes from a source file(source) to a target file or +// a list of attributes to set on a target file. +type CopyFileAttributes interface { + FormatAttributes() *string + notPubliclyImplementable() +} + +// SourceCopyFileAttributes specifies to copy file attributes from a source file(source) to a target file +type SourceCopyFileAttributes struct { +} + +func (s SourceCopyFileAttributes) FormatAttributes() *string { + return to.Ptr("source") +} + +func (s SourceCopyFileAttributes) notPubliclyImplementable() {} + +// DestinationCopyFileAttributes specifies a list of attributes to set on a target file. +type DestinationCopyFileAttributes NTFSFileAttributes + +func (d DestinationCopyFileAttributes) FormatAttributes() *string { + attributes := NTFSFileAttributes(d) + return to.Ptr(attributes.String()) +} + +func (d DestinationCopyFileAttributes) notPubliclyImplementable() {} diff --git a/sdk/storage/azfile/internal/exported/exported.go b/sdk/storage/azfile/internal/exported/exported.go new file mode 100644 index 000000000000..9bc1ca47df84 --- /dev/null +++ b/sdk/storage/azfile/internal/exported/exported.go @@ -0,0 +1,33 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "fmt" + "strconv" +) + +// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange +// which has an offset but no zero value count indicates from the offset to the resource's end. +type HTTPRange struct { + Offset int64 + Count int64 +} + +// FormatHTTPRange converts an HTTPRange to its string format. +func FormatHTTPRange(r HTTPRange) *string { + if r.Offset == 0 && r.Count == 0 { + return nil // No specified range + } + endOffset := "" // if count == CountToEnd (0) + if r.Count > 0 { + endOffset = strconv.FormatInt((r.Offset+r.Count)-1, 10) + } + dataRange := fmt.Sprintf("bytes=%v-%s", r.Offset, endOffset) + return &dataRange +} diff --git a/sdk/storage/azfile/internal/exported/file_permissions.go b/sdk/storage/azfile/internal/exported/file_permissions.go new file mode 100644 index 000000000000..73fce6afb27c --- /dev/null +++ b/sdk/storage/azfile/internal/exported/file_permissions.go @@ -0,0 +1,32 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +// Permissions contains the optional parameters for the permissions on the file. +type Permissions struct { + // If specified the permission (security descriptor) shall be set for the directory/file. This header can be used if Permission + // size is <= 8KB, else x-ms-file-permission-key header shall be used. Default + // value: Inherit. If SDDL is specified as input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission + // or x-ms-file-permission-key should be specified. + Permission *string + // Key of the permission to be set for the directory/file. + // Note: Only one of the x-ms-file-permission or x-ms-file-permission-key should be specified. + PermissionKey *string +} + +// Format returns file permission string and permission key. +func (p *Permissions) Format(defaultFilePermissionStr string) (*string, *string) { + if p == nil { + return &defaultFilePermissionStr, nil + } + + if p.Permission == nil && p.PermissionKey == nil { + return &defaultFilePermissionStr, nil + } else { + return p.Permission, p.PermissionKey + } +} diff --git a/sdk/storage/azfile/internal/exported/log_events.go b/sdk/storage/azfile/internal/exported/log_events.go new file mode 100644 index 000000000000..d33528ea8eb2 --- /dev/null +++ b/sdk/storage/azfile/internal/exported/log_events.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// NOTE: these are publicly exported via type-aliasing in azfile/log.go +const ( + // EventUpload is used when we compute number of chunks to upload and size of each chunk. + EventUpload log.Event = "azfile.Upload" +) diff --git a/sdk/storage/azfile/internal/exported/shared_key_credential.go b/sdk/storage/azfile/internal/exported/shared_key_credential.go new file mode 100644 index 000000000000..439617d07ba1 --- /dev/null +++ b/sdk/storage/azfile/internal/exported/shared_key_credential.go @@ -0,0 +1,218 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "sort" + "strings" + "sync/atomic" + "time" + + azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential struct { + // Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only + accountName string + accountKey atomic.Value // []byte +} + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName string, accountKey string) (*SharedKeyCredential, error) { + c := SharedKeyCredential{accountName: accountName} + if err := c.SetAccountKey(accountKey); err != nil { + return nil, err + } + return &c, nil +} + +// AccountName returns the Storage account's name. +func (c *SharedKeyCredential) AccountName() string { + return c.accountName +} + +// SetAccountKey replaces the existing account key with the specified account key. +func (c *SharedKeyCredential) SetAccountKey(accountKey string) error { + _bytes, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return fmt.Errorf("decode account key: %w", err) + } + c.accountKey.Store(_bytes) + return nil +} + +// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. +func (c *SharedKeyCredential) computeHMACSHA256(message string) (string, error) { + h := hmac.New(sha256.New, c.accountKey.Load().([]byte)) + _, err := h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)), err +} + +func (c *SharedKeyCredential) buildStringToSign(req *http.Request) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + headers := req.Header + contentLength := getHeader(shared.HeaderContentLength, headers) + if contentLength == "0" { + contentLength = "" + } + + canonicalizedResource, err := c.buildCanonicalizedResource(req.URL) + if err != nil { + return "", err + } + + stringToSign := strings.Join([]string{ + req.Method, + getHeader(shared.HeaderContentEncoding, headers), + getHeader(shared.HeaderContentLanguage, headers), + contentLength, + getHeader(shared.HeaderContentMD5, headers), + getHeader(shared.HeaderContentType, headers), + "", // Empty date because x-ms-date is expected (as per web page above) + getHeader(shared.HeaderIfModifiedSince, headers), + getHeader(shared.HeaderIfMatch, headers), + getHeader(shared.HeaderIfNoneMatch, headers), + getHeader(shared.HeaderIfUnmodifiedSince, headers), + getHeader(shared.HeaderRange, headers), + c.buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + return stringToSign, nil +} + +func getHeader(key string, headers map[string][]string) string { + if headers == nil { + return "" + } + if v, ok := headers[key]; ok { + if len(v) > 0 { + return v[0] + } + } + + return "" +} + +func (c *SharedKeyCredential) buildCanonicalizedHeader(headers http.Header) string { + cm := map[string][]string{} + for k, v := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = v // NOTE: the value must not have any whitespace around it. + } + } + if len(cm) == 0 { + return "" + } + + keys := make([]string, 0, len(cm)) + for key := range cm { + keys = append(keys, key) + } + sort.Strings(keys) + ch := bytes.NewBufferString("") + for i, key := range keys { + if i > 0 { + ch.WriteRune('\n') + } + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(strings.Join(cm[key], ",")) + } + return ch.String() +} + +func (c *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + cr := bytes.NewBufferString("/") + cr.WriteString(c.accountName) + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } else { + // a slash is required to indicate the root path + cr.WriteString("/") + } + + // params is a map[string][]string; param name is key; params values is []string + params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values + if err != nil { + return "", fmt.Errorf("failed to parse query params: %w", err) + } + + if len(params) > 0 { // There is at least 1 query parameter + var paramNames []string // We use this to sort the parameter key names + for paramName := range params { + paramNames = append(paramNames, paramName) // paramNames must be lowercase + } + sort.Strings(paramNames) + + for _, paramName := range paramNames { + paramValues := params[paramName] + sort.Strings(paramValues) + + // Join the sorted key values separated by ',' + // Then prepend "keyName:"; then add this string to the buffer + cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ",")) + } + } + return cr.String(), nil +} + +// ComputeHMACSHA256 is a helper for computing the signed string outside of this package. +func ComputeHMACSHA256(cred *SharedKeyCredential, message string) (string, error) { + return cred.computeHMACSHA256(message) +} + +// the following content isn't actually exported but must live +// next to SharedKeyCredential as it uses its unexported methods + +type SharedKeyCredPolicy struct { + cred *SharedKeyCredential +} + +func NewSharedKeyCredPolicy(cred *SharedKeyCredential) *SharedKeyCredPolicy { + return &SharedKeyCredPolicy{cred: cred} +} + +func (s *SharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) { + if d := getHeader(shared.HeaderXmsDate, req.Raw().Header); d == "" { + req.Raw().Header.Set(shared.HeaderXmsDate, time.Now().UTC().Format(http.TimeFormat)) + } + stringToSign, err := s.cred.buildStringToSign(req.Raw()) + if err != nil { + return nil, err + } + signature, err := s.cred.computeHMACSHA256(stringToSign) + if err != nil { + return nil, err + } + authHeader := strings.Join([]string{"SharedKey ", s.cred.AccountName(), ":", signature}, "") + req.Raw().Header.Set(shared.HeaderAuthorization, authHeader) + + response, err := req.Next() + if err != nil && response != nil && response.StatusCode == http.StatusForbidden { + // Service failed to authenticate request, log it + log.Write(azlog.EventResponse, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n") + } + return response, err +} diff --git a/sdk/storage/azfile/internal/exported/smb_property.go b/sdk/storage/azfile/internal/exported/smb_property.go new file mode 100644 index 000000000000..894e9455b760 --- /dev/null +++ b/sdk/storage/azfile/internal/exported/smb_property.go @@ -0,0 +1,98 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "strings" + "time" +) + +// SMBProperties contains the optional parameters regarding the SMB/NTFS properties for a file. +type SMBProperties struct { + // NTFSFileAttributes for Files and Directories. Default value is 'None' for file and + // 'Directory' for directory. ‘None’ can also be specified as default. + Attributes *NTFSFileAttributes + // The Coordinated Universal Time (UTC) creation time for the file/directory. Default value is 'now'. + CreationTime *time.Time + // The Coordinated Universal Time (UTC) last write time for the file/directory. Default value is 'now'. + LastWriteTime *time.Time +} + +// Format returns file attributes, creation time and last write time. +func (sp *SMBProperties) Format(isDir bool, defaultFileAttributes string, defaultCurrentTimeString string) (fileAttributes string, creationTime string, lastWriteTime string) { + if sp == nil { + return defaultFileAttributes, defaultCurrentTimeString, defaultCurrentTimeString + } + + fileAttributes = defaultFileAttributes + if sp.Attributes != nil { + fileAttributes = sp.Attributes.String() + if fileAttributes == "" { + fileAttributes = defaultFileAttributes + } else if isDir && strings.ToLower(fileAttributes) != "none" { + // Directories need to have this attribute included, if setting any attributes. + fileAttributes += "|Directory" + } + } + + creationTime = defaultCurrentTimeString + if sp.CreationTime != nil { + creationTime = sp.CreationTime.UTC().Format(generated.ISO8601) + } + + lastWriteTime = defaultCurrentTimeString + if sp.LastWriteTime != nil { + lastWriteTime = sp.LastWriteTime.UTC().Format(generated.ISO8601) + } + + return +} + +// NTFSFileAttributes for Files and Directories. +// The subset of attributes is listed at: https://learn.microsoft.com/en-us/rest/api/storageservices/set-file-properties#file-system-attributes. +type NTFSFileAttributes struct { + ReadOnly, Hidden, System, Directory, Archive, None, Temporary, Offline, NotContentIndexed, NoScrubData bool +} + +// String returns a string representation of NTFSFileAttributes. +func (f *NTFSFileAttributes) String() string { + fileAttributes := "" + if f.ReadOnly { + fileAttributes += "ReadOnly|" + } + if f.Hidden { + fileAttributes += "Hidden|" + } + if f.System { + fileAttributes += "System|" + } + if f.Directory { + fileAttributes += "Directory|" + } + if f.Archive { + fileAttributes += "Archive|" + } + if f.None { + fileAttributes += "None|" + } + if f.Temporary { + fileAttributes += "Temporary|" + } + if f.Offline { + fileAttributes += "Offline|" + } + if f.NotContentIndexed { + fileAttributes += "NotContentIndexed|" + } + if f.NoScrubData { + fileAttributes += "NoScrubData|" + } + + fileAttributes = strings.TrimSuffix(fileAttributes, "|") + return fileAttributes +} diff --git a/sdk/storage/azfile/internal/exported/transfer_validation_option.go b/sdk/storage/azfile/internal/exported/transfer_validation_option.go new file mode 100644 index 000000000000..ae8df1ea0def --- /dev/null +++ b/sdk/storage/azfile/internal/exported/transfer_validation_option.go @@ -0,0 +1,28 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "io" +) + +// TransferValidationType abstracts the various mechanisms used to verify a transfer. +type TransferValidationType interface { + Apply(io.ReadSeekCloser, generated.TransactionalContentSetter) (io.ReadSeekCloser, error) + notPubliclyImplementable() +} + +// TransferValidationTypeMD5 is a TransferValidationType used to provide a precomputed MD5. +type TransferValidationTypeMD5 []byte + +func (c TransferValidationTypeMD5) Apply(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) { + cfg.SetMD5(c) + return rsc, nil +} + +func (TransferValidationTypeMD5) notPubliclyImplementable() {} diff --git a/sdk/storage/azfile/internal/exported/version.go b/sdk/storage/azfile/internal/exported/version.go new file mode 100644 index 000000000000..8e130784dbf2 --- /dev/null +++ b/sdk/storage/azfile/internal/exported/version.go @@ -0,0 +1,12 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +const ( + ModuleName = "azfile" + ModuleVersion = "v0.1.0" +) diff --git a/sdk/storage/azfile/internal/generated/autorest.md b/sdk/storage/azfile/internal/generated/autorest.md new file mode 100644 index 000000000000..634ccff33f46 --- /dev/null +++ b/sdk/storage/azfile/internal/generated/autorest.md @@ -0,0 +1,309 @@ +# Code Generation - Azure File SDK for Golang + +### Settings + +```yaml +go: true +clear-output-folder: false +version: "^3.0.0" +license-header: MICROSOFT_MIT_NO_VERSION +input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/bbea558ac43d6ebec72455233c84b0158c89fcda/specification/storage/data-plane/Microsoft.FileStorage/preview/2020-10-02/file.json" +credential-scope: "https://storage.azure.com/.default" +output-folder: ../generated +file-prefix: "zz_" +openapi-type: "data-plane" +verbose: true +security: AzureKey +modelerfour: + group-parameters: false + seal-single-value-enum-by-default: true + lenient-model-deduplication: true +export-clients: true +use: "@autorest/go@4.0.0-preview.45" +``` + +### Don't include share name, directory, or file name in path - we have direct URIs + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"] + transform: > + for (const property in $) + { + if (property.includes('/{shareName}/{directory}/{fileName}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ShareName") && false == param['$ref'].endsWith("#/parameters/DirectoryPath") && false == param['$ref'].endsWith("#/parameters/FilePath"))}); + } + else if (property.includes('/{shareName}/{directory}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ShareName") && false == param['$ref'].endsWith("#/parameters/DirectoryPath"))}); + } + else if (property.includes('/{shareName}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ShareName"))}); + } + } +``` + +### Add Last-Modified to SetMetadata + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"]["/{shareName}/{directory}/{fileName}?comp=metadata"] + transform: > + $.put.responses["200"].headers["Last-Modified"] = { + "type": "string", + "format": "date-time-rfc1123", + "description": "Returns the date and time the file was last modified. Any operation that modifies the file, including an update of the file's metadata or properties, changes the last-modified time of the file." + } +``` + +### Add Content-MD5 to Put Range from URL + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"]["/{shareName}/{directory}/{fileName}?comp=range&fromURL"] + transform: > + $.put.responses["201"].headers["Content-MD5"] = { + "type": "string", + "format": "byte", + "description": "This header is returned so that the client can check for message content integrity. The value of this header is computed by the File service; it is not necessarily the same value as may have been specified in the request headers." + } +``` + +### Rename FileHttpHeaders to ShareFileHTTPHeaders and remove file prefix from properties + +``` yaml +directive: +- from: swagger-document + where: $.parameters + transform: > + $.FileCacheControl["x-ms-parameter-grouping"].name = "share-file-http-headers"; + $.FileCacheControl["x-ms-client-name"] = "cacheControl"; + $.FileContentDisposition["x-ms-parameter-grouping"].name = "share-file-http-headers"; + $.FileContentDisposition["x-ms-client-name"] = "contentDisposition"; + $.FileContentEncoding["x-ms-parameter-grouping"].name = "share-file-http-headers"; + $.FileContentEncoding["x-ms-client-name"] = "contentEncoding"; + $.FileContentLanguage["x-ms-parameter-grouping"].name = "share-file-http-headers"; + $.FileContentLanguage["x-ms-client-name"] = "contentLanguage"; + $.FileContentMD5["x-ms-parameter-grouping"].name = "share-file-http-headers"; + $.FileContentMD5["x-ms-client-name"] = "contentMd5"; + $.FileContentType["x-ms-parameter-grouping"].name = "share-file-http-headers"; + $.FileContentType["x-ms-client-name"] = "contentType"; +``` + +### use azcore.ETag + +``` yaml +directive: +- from: zz_models.go + where: $ + transform: >- + return $. + replace(/import "time"/, `import (\n\t"time"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore"\n)`). + replace(/Etag\s+\*string/g, `ETag *azcore.ETag`); + +- from: zz_response_types.go + where: $ + transform: >- + return $. + replace(/"time"/, `"time"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore"`). + replace(/ETag\s+\*string/g, `ETag *azcore.ETag`); + +- from: + - zz_directory_client.go + - zz_file_client.go + - zz_share_client.go + where: $ + transform: >- + return $. + replace(/"github\.com\/Azure\/azure\-sdk\-for\-go\/sdk\/azcore\/policy"/, `"github.com/Azure/azure-sdk-for-go/sdk/azcore"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"`). + replace(/result\.ETag\s+=\s+&val/g, `result.ETag = (*azcore.ETag)(&val)`); +``` + +### Rename models - remove `Share` prefix + +``` yaml +directive: +- rename-model: + from: ShareProtocolSettings + to: ProtocolSettings +- rename-model: + from: ShareSmbSettings + to: SMBSettings +``` + +### Capitalise SMB field + +``` yaml +directive: +- from: + - zz_file_client.go + - zz_models.go + where: $ + transform: >- + return $. + replace(/SmbMultichannel/g, `SMBMultichannel`). + replace(/copyFileSmbInfo/g, `copyFileSMBInfo`). + replace(/CopyFileSmbInfo/g, `CopyFileSMBInfo`). + replace(/Smb\s+\*ShareSMBSettings/g, `SMB *ShareSMBSettings`); +``` + +### Rename models - remove `Item` and `Internal` suffix + +``` yaml +directive: +- rename-model: + from: DirectoryItem + to: Directory +- rename-model: + from: FileItem + to: File +- rename-model: + from: HandleItem + to: Handle +- rename-model: + from: ShareItemInternal + to: Share +- rename-model: + from: SharePropertiesInternal + to: ShareProperties +``` + +### Remove `Items` and `List` suffix + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/DirectoryItems/g, "Directories"). + replace(/FileItems/g, "Files"). + replace(/ShareItems/g, "Shares"). + replace(/HandleList/g, "Handles"); +``` + +### Rename `FileID` to `ID` (except for Handle object) + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + $.Directory.properties.FileId["x-ms-client-name"] = "ID"; + $.File.properties.FileId["x-ms-client-name"] = "ID"; + $.Handle.properties.HandleId["x-ms-client-name"] = "ID"; + +- from: + - zz_directory_client.go + - zz_file_client.go + - zz_response_types.go + where: $ + transform: >- + return $. + replace(/FileID/g, `ID`); +``` + + +### Change CORS acronym to be all caps and rename `FileParentID` to `ParentID` + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/Cors/g, "CORS"). + replace(/FileParentID/g, "ParentID"); +``` + +### Change cors xml to be correct + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/xml:"CORS>CORSRule"/g, "xml:\"Cors>CorsRule\""); +``` + +### Remove pager methods and export various generated methods in service client + +``` yaml +directive: + - from: zz_service_client.go + where: $ + transform: >- + return $. + replace(/func \(client \*ServiceClient\) NewListSharesSegmentPager\(.+\/\/ listSharesSegmentCreateRequest creates the ListSharesSegment request/s, `//\n// listSharesSegmentCreateRequest creates the ListSharesSegment request`). + replace(/\(client \*ServiceClient\) listSharesSegmentCreateRequest\(/, `(client *ServiceClient) ListSharesSegmentCreateRequest(`). + replace(/\(client \*ServiceClient\) listSharesSegmentHandleResponse\(/, `(client *ServiceClient) ListSharesSegmentHandleResponse(`); +``` + +### Use string type for FileCreationTime and FileLastWriteTime + +``` yaml +directive: +- from: swagger-document + where: $.parameters.FileCreationTime + transform: > + $.format = "str"; +- from: swagger-document + where: $.parameters.FileLastWriteTime + transform: > + $.format = "str"; +``` + +### Remove pager methods and export various generated methods in directory client + +``` yaml +directive: + - from: zz_directory_client.go + where: $ + transform: >- + return $. + replace(/func \(client \*DirectoryClient\) NewListFilesAndDirectoriesSegmentPager\(.+\/\/ listFilesAndDirectoriesSegmentCreateRequest creates the ListFilesAndDirectoriesSegment request/s, `//\n// listFilesAndDirectoriesSegmentCreateRequest creates the ListFilesAndDirectoriesSegment request`). + replace(/\(client \*DirectoryClient\) listFilesAndDirectoriesSegmentCreateRequest\(/, `(client *DirectoryClient) ListFilesAndDirectoriesSegmentCreateRequest(`). + replace(/\(client \*DirectoryClient\) listFilesAndDirectoriesSegmentHandleResponse\(/, `(client *DirectoryClient) ListFilesAndDirectoriesSegmentHandleResponse(`); +``` + +### Fix time format for parsing the response headers: x-ms-file-creation-time, x-ms-file-last-write-time, x-ms-file-change-time + +``` yaml +directive: + - from: + - zz_directory_client.go + - zz_file_client.go + where: $ + transform: >- + return $. + replace(/fileCreationTime,\s+err\s+\:=\s+time\.Parse\(time\.RFC1123,\s+val\)/g, `fileCreationTime, err := time.Parse(ISO8601, val)`). + replace(/fileLastWriteTime,\s+err\s+\:=\s+time\.Parse\(time\.RFC1123,\s+val\)/g, `fileLastWriteTime, err := time.Parse(ISO8601, val)`). + replace(/fileChangeTime,\s+err\s+\:=\s+time\.Parse\(time\.RFC1123,\s+val\)/g, `fileChangeTime, err := time.Parse(ISO8601, val)`); +``` + +### Change `Duration` parameter in leases to be required + +``` yaml +directive: +- from: swagger-document + where: $.parameters.LeaseDuration + transform: > + $.required = true; +``` + +### Convert ShareUsageBytes to int64 + +``` yaml +directive: + - from: zz_models.go + where: $ + transform: >- + return $. + replace(/ShareUsageBytes\s+\*int32/g, `ShareUsageBytes *int64`); +``` diff --git a/sdk/storage/azfile/internal/generated/build.go b/sdk/storage/azfile/internal/generated/build.go new file mode 100644 index 000000000000..57f112001bd2 --- /dev/null +++ b/sdk/storage/azfile/internal/generated/build.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +//go:generate autorest ./autorest.md +//go:generate gofmt -w . + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated diff --git a/sdk/storage/azfile/internal/generated/directory_client.go b/sdk/storage/azfile/internal/generated/directory_client.go new file mode 100644 index 000000000000..11a75a9f50c8 --- /dev/null +++ b/sdk/storage/azfile/internal/generated/directory_client.go @@ -0,0 +1,22 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +const ( + // ISO8601 is used for formatting file creation, last write and change time. + ISO8601 = "2006-01-02T15:04:05.0000000Z07:00" +) + +func (client *DirectoryClient) Endpoint() string { + return client.endpoint +} + +func (client *DirectoryClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/sdk/storage/azfile/internal/generated/file_client.go b/sdk/storage/azfile/internal/generated/file_client.go new file mode 100644 index 000000000000..f4a01a783938 --- /dev/null +++ b/sdk/storage/azfile/internal/generated/file_client.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *FileClient) Endpoint() string { + return client.endpoint +} + +func (client *FileClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/sdk/storage/azfile/internal/generated/models.go b/sdk/storage/azfile/internal/generated/models.go new file mode 100644 index 000000000000..6450b7de2e82 --- /dev/null +++ b/sdk/storage/azfile/internal/generated/models.go @@ -0,0 +1,25 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +type TransactionalContentSetter interface { + SetMD5([]byte) + // add SetCRC64() when Azure File service starts supporting it. +} + +func (f *FileClientUploadRangeOptions) SetMD5(v []byte) { + f.ContentMD5 = v +} + +type SourceContentSetter interface { + SetSourceContentCRC64(v []byte) + // add SetSourceContentMD5() when Azure File service starts supporting it. +} + +func (f *FileClientUploadRangeFromURLOptions) SetSourceContentCRC64(v []byte) { + f.SourceContentCRC64 = v +} diff --git a/sdk/storage/azfile/internal/generated/service_client.go b/sdk/storage/azfile/internal/generated/service_client.go new file mode 100644 index 000000000000..1f449b955e82 --- /dev/null +++ b/sdk/storage/azfile/internal/generated/service_client.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *ServiceClient) Endpoint() string { + return client.endpoint +} + +func (client *ServiceClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/sdk/storage/azfile/internal/generated/share_client.go b/sdk/storage/azfile/internal/generated/share_client.go new file mode 100644 index 000000000000..040785814606 --- /dev/null +++ b/sdk/storage/azfile/internal/generated/share_client.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *ShareClient) Endpoint() string { + return client.endpoint +} + +func (client *ShareClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/sdk/storage/azfile/internal/generated/zz_constants.go b/sdk/storage/azfile/internal/generated/zz_constants.go new file mode 100644 index 000000000000..13ee55aa841e --- /dev/null +++ b/sdk/storage/azfile/internal/generated/zz_constants.go @@ -0,0 +1,342 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +type CopyStatusType string + +const ( + CopyStatusTypePending CopyStatusType = "pending" + CopyStatusTypeSuccess CopyStatusType = "success" + CopyStatusTypeAborted CopyStatusType = "aborted" + CopyStatusTypeFailed CopyStatusType = "failed" +) + +// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return []CopyStatusType{ + CopyStatusTypePending, + CopyStatusTypeSuccess, + CopyStatusTypeAborted, + CopyStatusTypeFailed, + } +} + +type DeleteSnapshotsOptionType string + +const ( + DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = "include" + DeleteSnapshotsOptionTypeIncludeLeased DeleteSnapshotsOptionType = "include-leased" +) + +// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type. +func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { + return []DeleteSnapshotsOptionType{ + DeleteSnapshotsOptionTypeInclude, + DeleteSnapshotsOptionTypeIncludeLeased, + } +} + +type FileRangeWriteType string + +const ( + FileRangeWriteTypeUpdate FileRangeWriteType = "update" + FileRangeWriteTypeClear FileRangeWriteType = "clear" +) + +// PossibleFileRangeWriteTypeValues returns the possible values for the FileRangeWriteType const type. +func PossibleFileRangeWriteTypeValues() []FileRangeWriteType { + return []FileRangeWriteType{ + FileRangeWriteTypeUpdate, + FileRangeWriteTypeClear, + } +} + +// LeaseDurationType - When a share is leased, specifies whether the lease is of infinite or fixed duration. +type LeaseDurationType string + +const ( + LeaseDurationTypeInfinite LeaseDurationType = "infinite" + LeaseDurationTypeFixed LeaseDurationType = "fixed" +) + +// PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type. +func PossibleLeaseDurationTypeValues() []LeaseDurationType { + return []LeaseDurationType{ + LeaseDurationTypeInfinite, + LeaseDurationTypeFixed, + } +} + +// LeaseStateType - Lease state of the share. +type LeaseStateType string + +const ( + LeaseStateTypeAvailable LeaseStateType = "available" + LeaseStateTypeLeased LeaseStateType = "leased" + LeaseStateTypeExpired LeaseStateType = "expired" + LeaseStateTypeBreaking LeaseStateType = "breaking" + LeaseStateTypeBroken LeaseStateType = "broken" +) + +// PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type. +func PossibleLeaseStateTypeValues() []LeaseStateType { + return []LeaseStateType{ + LeaseStateTypeAvailable, + LeaseStateTypeLeased, + LeaseStateTypeExpired, + LeaseStateTypeBreaking, + LeaseStateTypeBroken, + } +} + +// LeaseStatusType - The current lease status of the share. +type LeaseStatusType string + +const ( + LeaseStatusTypeLocked LeaseStatusType = "locked" + LeaseStatusTypeUnlocked LeaseStatusType = "unlocked" +) + +// PossibleLeaseStatusTypeValues returns the possible values for the LeaseStatusType const type. +func PossibleLeaseStatusTypeValues() []LeaseStatusType { + return []LeaseStatusType{ + LeaseStatusTypeLocked, + LeaseStatusTypeUnlocked, + } +} + +type ListFilesIncludeType string + +const ( + ListFilesIncludeTypeTimestamps ListFilesIncludeType = "Timestamps" + ListFilesIncludeTypeEtag ListFilesIncludeType = "Etag" + ListFilesIncludeTypeAttributes ListFilesIncludeType = "Attributes" + ListFilesIncludeTypePermissionKey ListFilesIncludeType = "PermissionKey" +) + +// PossibleListFilesIncludeTypeValues returns the possible values for the ListFilesIncludeType const type. +func PossibleListFilesIncludeTypeValues() []ListFilesIncludeType { + return []ListFilesIncludeType{ + ListFilesIncludeTypeTimestamps, + ListFilesIncludeTypeEtag, + ListFilesIncludeTypeAttributes, + ListFilesIncludeTypePermissionKey, + } +} + +type ListSharesIncludeType string + +const ( + ListSharesIncludeTypeSnapshots ListSharesIncludeType = "snapshots" + ListSharesIncludeTypeMetadata ListSharesIncludeType = "metadata" + ListSharesIncludeTypeDeleted ListSharesIncludeType = "deleted" +) + +// PossibleListSharesIncludeTypeValues returns the possible values for the ListSharesIncludeType const type. +func PossibleListSharesIncludeTypeValues() []ListSharesIncludeType { + return []ListSharesIncludeType{ + ListSharesIncludeTypeSnapshots, + ListSharesIncludeTypeMetadata, + ListSharesIncludeTypeDeleted, + } +} + +type PermissionCopyModeType string + +const ( + PermissionCopyModeTypeSource PermissionCopyModeType = "source" + PermissionCopyModeTypeOverride PermissionCopyModeType = "override" +) + +// PossiblePermissionCopyModeTypeValues returns the possible values for the PermissionCopyModeType const type. +func PossiblePermissionCopyModeTypeValues() []PermissionCopyModeType { + return []PermissionCopyModeType{ + PermissionCopyModeTypeSource, + PermissionCopyModeTypeOverride, + } +} + +type ShareAccessTier string + +const ( + ShareAccessTierCool ShareAccessTier = "Cool" + ShareAccessTierHot ShareAccessTier = "Hot" + ShareAccessTierTransactionOptimized ShareAccessTier = "TransactionOptimized" +) + +// PossibleShareAccessTierValues returns the possible values for the ShareAccessTier const type. +func PossibleShareAccessTierValues() []ShareAccessTier { + return []ShareAccessTier{ + ShareAccessTierCool, + ShareAccessTierHot, + ShareAccessTierTransactionOptimized, + } +} + +type ShareRootSquash string + +const ( + ShareRootSquashNoRootSquash ShareRootSquash = "NoRootSquash" + ShareRootSquashRootSquash ShareRootSquash = "RootSquash" + ShareRootSquashAllSquash ShareRootSquash = "AllSquash" +) + +// PossibleShareRootSquashValues returns the possible values for the ShareRootSquash const type. +func PossibleShareRootSquashValues() []ShareRootSquash { + return []ShareRootSquash{ + ShareRootSquashNoRootSquash, + ShareRootSquashRootSquash, + ShareRootSquashAllSquash, + } +} + +// StorageErrorCode - Error codes returned by the service +type StorageErrorCode string + +const ( + StorageErrorCodeAccountAlreadyExists StorageErrorCode = "AccountAlreadyExists" + StorageErrorCodeAccountBeingCreated StorageErrorCode = "AccountBeingCreated" + StorageErrorCodeAccountIsDisabled StorageErrorCode = "AccountIsDisabled" + StorageErrorCodeAuthenticationFailed StorageErrorCode = "AuthenticationFailed" + StorageErrorCodeAuthorizationFailure StorageErrorCode = "AuthorizationFailure" + StorageErrorCodeAuthorizationPermissionMismatch StorageErrorCode = "AuthorizationPermissionMismatch" + StorageErrorCodeAuthorizationProtocolMismatch StorageErrorCode = "AuthorizationProtocolMismatch" + StorageErrorCodeAuthorizationResourceTypeMismatch StorageErrorCode = "AuthorizationResourceTypeMismatch" + StorageErrorCodeAuthorizationServiceMismatch StorageErrorCode = "AuthorizationServiceMismatch" + StorageErrorCodeAuthorizationSourceIPMismatch StorageErrorCode = "AuthorizationSourceIPMismatch" + StorageErrorCodeCannotDeleteFileOrDirectory StorageErrorCode = "CannotDeleteFileOrDirectory" + StorageErrorCodeClientCacheFlushDelay StorageErrorCode = "ClientCacheFlushDelay" + StorageErrorCodeConditionHeadersNotSupported StorageErrorCode = "ConditionHeadersNotSupported" + StorageErrorCodeConditionNotMet StorageErrorCode = "ConditionNotMet" + StorageErrorCodeContainerQuotaDowngradeNotAllowed StorageErrorCode = "ContainerQuotaDowngradeNotAllowed" + StorageErrorCodeDeletePending StorageErrorCode = "DeletePending" + StorageErrorCodeDirectoryNotEmpty StorageErrorCode = "DirectoryNotEmpty" + StorageErrorCodeEmptyMetadataKey StorageErrorCode = "EmptyMetadataKey" + StorageErrorCodeFeatureVersionMismatch StorageErrorCode = "FeatureVersionMismatch" + StorageErrorCodeFileLockConflict StorageErrorCode = "FileLockConflict" + StorageErrorCodeInsufficientAccountPermissions StorageErrorCode = "InsufficientAccountPermissions" + StorageErrorCodeInternalError StorageErrorCode = "InternalError" + StorageErrorCodeInvalidAuthenticationInfo StorageErrorCode = "InvalidAuthenticationInfo" + StorageErrorCodeInvalidFileOrDirectoryPathName StorageErrorCode = "InvalidFileOrDirectoryPathName" + StorageErrorCodeInvalidHTTPVerb StorageErrorCode = "InvalidHttpVerb" + StorageErrorCodeInvalidHeaderValue StorageErrorCode = "InvalidHeaderValue" + StorageErrorCodeInvalidInput StorageErrorCode = "InvalidInput" + StorageErrorCodeInvalidMD5 StorageErrorCode = "InvalidMd5" + StorageErrorCodeInvalidMetadata StorageErrorCode = "InvalidMetadata" + StorageErrorCodeInvalidQueryParameterValue StorageErrorCode = "InvalidQueryParameterValue" + StorageErrorCodeInvalidRange StorageErrorCode = "InvalidRange" + StorageErrorCodeInvalidResourceName StorageErrorCode = "InvalidResourceName" + StorageErrorCodeInvalidURI StorageErrorCode = "InvalidUri" + StorageErrorCodeInvalidXMLDocument StorageErrorCode = "InvalidXmlDocument" + StorageErrorCodeInvalidXMLNodeValue StorageErrorCode = "InvalidXmlNodeValue" + StorageErrorCodeMD5Mismatch StorageErrorCode = "Md5Mismatch" + StorageErrorCodeMetadataTooLarge StorageErrorCode = "MetadataTooLarge" + StorageErrorCodeMissingContentLengthHeader StorageErrorCode = "MissingContentLengthHeader" + StorageErrorCodeMissingRequiredHeader StorageErrorCode = "MissingRequiredHeader" + StorageErrorCodeMissingRequiredQueryParameter StorageErrorCode = "MissingRequiredQueryParameter" + StorageErrorCodeMissingRequiredXMLNode StorageErrorCode = "MissingRequiredXmlNode" + StorageErrorCodeMultipleConditionHeadersNotSupported StorageErrorCode = "MultipleConditionHeadersNotSupported" + StorageErrorCodeOperationTimedOut StorageErrorCode = "OperationTimedOut" + StorageErrorCodeOutOfRangeInput StorageErrorCode = "OutOfRangeInput" + StorageErrorCodeOutOfRangeQueryParameterValue StorageErrorCode = "OutOfRangeQueryParameterValue" + StorageErrorCodeParentNotFound StorageErrorCode = "ParentNotFound" + StorageErrorCodeReadOnlyAttribute StorageErrorCode = "ReadOnlyAttribute" + StorageErrorCodeRequestBodyTooLarge StorageErrorCode = "RequestBodyTooLarge" + StorageErrorCodeRequestURLFailedToParse StorageErrorCode = "RequestUrlFailedToParse" + StorageErrorCodeResourceAlreadyExists StorageErrorCode = "ResourceAlreadyExists" + StorageErrorCodeResourceNotFound StorageErrorCode = "ResourceNotFound" + StorageErrorCodeResourceTypeMismatch StorageErrorCode = "ResourceTypeMismatch" + StorageErrorCodeServerBusy StorageErrorCode = "ServerBusy" + StorageErrorCodeShareAlreadyExists StorageErrorCode = "ShareAlreadyExists" + StorageErrorCodeShareBeingDeleted StorageErrorCode = "ShareBeingDeleted" + StorageErrorCodeShareDisabled StorageErrorCode = "ShareDisabled" + StorageErrorCodeShareHasSnapshots StorageErrorCode = "ShareHasSnapshots" + StorageErrorCodeShareNotFound StorageErrorCode = "ShareNotFound" + StorageErrorCodeShareSnapshotCountExceeded StorageErrorCode = "ShareSnapshotCountExceeded" + StorageErrorCodeShareSnapshotInProgress StorageErrorCode = "ShareSnapshotInProgress" + StorageErrorCodeShareSnapshotOperationNotSupported StorageErrorCode = "ShareSnapshotOperationNotSupported" + StorageErrorCodeSharingViolation StorageErrorCode = "SharingViolation" + StorageErrorCodeUnsupportedHTTPVerb StorageErrorCode = "UnsupportedHttpVerb" + StorageErrorCodeUnsupportedHeader StorageErrorCode = "UnsupportedHeader" + StorageErrorCodeUnsupportedQueryParameter StorageErrorCode = "UnsupportedQueryParameter" + StorageErrorCodeUnsupportedXMLNode StorageErrorCode = "UnsupportedXmlNode" +) + +// PossibleStorageErrorCodeValues returns the possible values for the StorageErrorCode const type. +func PossibleStorageErrorCodeValues() []StorageErrorCode { + return []StorageErrorCode{ + StorageErrorCodeAccountAlreadyExists, + StorageErrorCodeAccountBeingCreated, + StorageErrorCodeAccountIsDisabled, + StorageErrorCodeAuthenticationFailed, + StorageErrorCodeAuthorizationFailure, + StorageErrorCodeAuthorizationPermissionMismatch, + StorageErrorCodeAuthorizationProtocolMismatch, + StorageErrorCodeAuthorizationResourceTypeMismatch, + StorageErrorCodeAuthorizationServiceMismatch, + StorageErrorCodeAuthorizationSourceIPMismatch, + StorageErrorCodeCannotDeleteFileOrDirectory, + StorageErrorCodeClientCacheFlushDelay, + StorageErrorCodeConditionHeadersNotSupported, + StorageErrorCodeConditionNotMet, + StorageErrorCodeContainerQuotaDowngradeNotAllowed, + StorageErrorCodeDeletePending, + StorageErrorCodeDirectoryNotEmpty, + StorageErrorCodeEmptyMetadataKey, + StorageErrorCodeFeatureVersionMismatch, + StorageErrorCodeFileLockConflict, + StorageErrorCodeInsufficientAccountPermissions, + StorageErrorCodeInternalError, + StorageErrorCodeInvalidAuthenticationInfo, + StorageErrorCodeInvalidFileOrDirectoryPathName, + StorageErrorCodeInvalidHTTPVerb, + StorageErrorCodeInvalidHeaderValue, + StorageErrorCodeInvalidInput, + StorageErrorCodeInvalidMD5, + StorageErrorCodeInvalidMetadata, + StorageErrorCodeInvalidQueryParameterValue, + StorageErrorCodeInvalidRange, + StorageErrorCodeInvalidResourceName, + StorageErrorCodeInvalidURI, + StorageErrorCodeInvalidXMLDocument, + StorageErrorCodeInvalidXMLNodeValue, + StorageErrorCodeMD5Mismatch, + StorageErrorCodeMetadataTooLarge, + StorageErrorCodeMissingContentLengthHeader, + StorageErrorCodeMissingRequiredHeader, + StorageErrorCodeMissingRequiredQueryParameter, + StorageErrorCodeMissingRequiredXMLNode, + StorageErrorCodeMultipleConditionHeadersNotSupported, + StorageErrorCodeOperationTimedOut, + StorageErrorCodeOutOfRangeInput, + StorageErrorCodeOutOfRangeQueryParameterValue, + StorageErrorCodeParentNotFound, + StorageErrorCodeReadOnlyAttribute, + StorageErrorCodeRequestBodyTooLarge, + StorageErrorCodeRequestURLFailedToParse, + StorageErrorCodeResourceAlreadyExists, + StorageErrorCodeResourceNotFound, + StorageErrorCodeResourceTypeMismatch, + StorageErrorCodeServerBusy, + StorageErrorCodeShareAlreadyExists, + StorageErrorCodeShareBeingDeleted, + StorageErrorCodeShareDisabled, + StorageErrorCodeShareHasSnapshots, + StorageErrorCodeShareNotFound, + StorageErrorCodeShareSnapshotCountExceeded, + StorageErrorCodeShareSnapshotInProgress, + StorageErrorCodeShareSnapshotOperationNotSupported, + StorageErrorCodeSharingViolation, + StorageErrorCodeUnsupportedHTTPVerb, + StorageErrorCodeUnsupportedHeader, + StorageErrorCodeUnsupportedQueryParameter, + StorageErrorCodeUnsupportedXMLNode, + } +} diff --git a/sdk/storage/azfile/internal/generated/zz_directory_client.go b/sdk/storage/azfile/internal/generated/zz_directory_client.go new file mode 100644 index 000000000000..1b1eed71d03f --- /dev/null +++ b/sdk/storage/azfile/internal/generated/zz_directory_client.go @@ -0,0 +1,766 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "net/http" + "strconv" + "strings" + "time" +) + +// DirectoryClient contains the methods for the Directory group. +// Don't use this type directly, use NewDirectoryClient() instead. +type DirectoryClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewDirectoryClient creates a new instance of DirectoryClient with the specified values. +// - endpoint - The URL of the service account, share, directory or file that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. +func NewDirectoryClient(endpoint string, pl runtime.Pipeline) *DirectoryClient { + client := &DirectoryClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// Create - Creates a new directory under the specified share or parent directory. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - fileAttributes - If specified, the provided file attributes shall be set. Default value: ‘Archive’ for file and ‘Directory’ +// for directory. ‘None’ can also be specified as default. +// - fileCreationTime - Creation time for the file/directory. Default value: Now. +// - fileLastWriteTime - Last write time for the file/directory. Default value: Now. +// - options - DirectoryClientCreateOptions contains the optional parameters for the DirectoryClient.Create method. +func (client *DirectoryClient) Create(ctx context.Context, fileAttributes string, fileCreationTime string, fileLastWriteTime string, options *DirectoryClientCreateOptions) (DirectoryClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, fileAttributes, fileCreationTime, fileLastWriteTime, options) + if err != nil { + return DirectoryClientCreateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DirectoryClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return DirectoryClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *DirectoryClient) createCreateRequest(ctx context.Context, fileAttributes string, fileCreationTime string, fileLastWriteTime string, options *DirectoryClientCreateOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "directory") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.FilePermission != nil { + req.Raw().Header["x-ms-file-permission"] = []string{*options.FilePermission} + } + if options != nil && options.FilePermissionKey != nil { + req.Raw().Header["x-ms-file-permission-key"] = []string{*options.FilePermissionKey} + } + req.Raw().Header["x-ms-file-attributes"] = []string{fileAttributes} + req.Raw().Header["x-ms-file-creation-time"] = []string{fileCreationTime} + req.Raw().Header["x-ms-file-last-write-time"] = []string{fileLastWriteTime} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *DirectoryClient) createHandleResponse(resp *http.Response) (DirectoryClientCreateResponse, error) { + result := DirectoryClientCreateResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return DirectoryClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return DirectoryClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return DirectoryClientCreateResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-file-permission-key"); val != "" { + result.FilePermissionKey = &val + } + if val := resp.Header.Get("x-ms-file-attributes"); val != "" { + result.FileAttributes = &val + } + if val := resp.Header.Get("x-ms-file-creation-time"); val != "" { + fileCreationTime, err := time.Parse(ISO8601, val) + if err != nil { + return DirectoryClientCreateResponse{}, err + } + result.FileCreationTime = &fileCreationTime + } + if val := resp.Header.Get("x-ms-file-last-write-time"); val != "" { + fileLastWriteTime, err := time.Parse(ISO8601, val) + if err != nil { + return DirectoryClientCreateResponse{}, err + } + result.FileLastWriteTime = &fileLastWriteTime + } + if val := resp.Header.Get("x-ms-file-change-time"); val != "" { + fileChangeTime, err := time.Parse(ISO8601, val) + if err != nil { + return DirectoryClientCreateResponse{}, err + } + result.FileChangeTime = &fileChangeTime + } + if val := resp.Header.Get("x-ms-file-id"); val != "" { + result.ID = &val + } + if val := resp.Header.Get("x-ms-file-parent-id"); val != "" { + result.ParentID = &val + } + return result, nil +} + +// Delete - Removes the specified empty directory. Note that the directory must be empty before it can be deleted. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - DirectoryClientDeleteOptions contains the optional parameters for the DirectoryClient.Delete method. +func (client *DirectoryClient) Delete(ctx context.Context, options *DirectoryClientDeleteOptions) (DirectoryClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, options) + if err != nil { + return DirectoryClientDeleteResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DirectoryClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return DirectoryClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return client.deleteHandleResponse(resp) +} + +// deleteCreateRequest creates the Delete request. +func (client *DirectoryClient) deleteCreateRequest(ctx context.Context, options *DirectoryClientDeleteOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "directory") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// deleteHandleResponse handles the Delete response. +func (client *DirectoryClient) deleteHandleResponse(resp *http.Response) (DirectoryClientDeleteResponse, error) { + result := DirectoryClientDeleteResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return DirectoryClientDeleteResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// ForceCloseHandles - Closes all handles open for given directory. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - handleID - Specifies handle ID opened on the file or directory to be closed. Asterisk (‘*’) is a wildcard that specifies +// all handles. +// - options - DirectoryClientForceCloseHandlesOptions contains the optional parameters for the DirectoryClient.ForceCloseHandles +// method. +func (client *DirectoryClient) ForceCloseHandles(ctx context.Context, handleID string, options *DirectoryClientForceCloseHandlesOptions) (DirectoryClientForceCloseHandlesResponse, error) { + req, err := client.forceCloseHandlesCreateRequest(ctx, handleID, options) + if err != nil { + return DirectoryClientForceCloseHandlesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DirectoryClientForceCloseHandlesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DirectoryClientForceCloseHandlesResponse{}, runtime.NewResponseError(resp) + } + return client.forceCloseHandlesHandleResponse(resp) +} + +// forceCloseHandlesCreateRequest creates the ForceCloseHandles request. +func (client *DirectoryClient) forceCloseHandlesCreateRequest(ctx context.Context, handleID string, options *DirectoryClientForceCloseHandlesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "forceclosehandles") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-handle-id"] = []string{handleID} + if options != nil && options.Recursive != nil { + req.Raw().Header["x-ms-recursive"] = []string{strconv.FormatBool(*options.Recursive)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// forceCloseHandlesHandleResponse handles the ForceCloseHandles response. +func (client *DirectoryClient) forceCloseHandlesHandleResponse(resp *http.Response) (DirectoryClientForceCloseHandlesResponse, error) { + result := DirectoryClientForceCloseHandlesResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return DirectoryClientForceCloseHandlesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-marker"); val != "" { + result.Marker = &val + } + if val := resp.Header.Get("x-ms-number-of-handles-closed"); val != "" { + numberOfHandlesClosed32, err := strconv.ParseInt(val, 10, 32) + numberOfHandlesClosed := int32(numberOfHandlesClosed32) + if err != nil { + return DirectoryClientForceCloseHandlesResponse{}, err + } + result.NumberOfHandlesClosed = &numberOfHandlesClosed + } + if val := resp.Header.Get("x-ms-number-of-handles-failed"); val != "" { + numberOfHandlesFailedToClose32, err := strconv.ParseInt(val, 10, 32) + numberOfHandlesFailedToClose := int32(numberOfHandlesFailedToClose32) + if err != nil { + return DirectoryClientForceCloseHandlesResponse{}, err + } + result.NumberOfHandlesFailedToClose = &numberOfHandlesFailedToClose + } + return result, nil +} + +// GetProperties - Returns all system properties for the specified directory, and can also be used to check the existence +// of a directory. The data returned does not include the files in the directory or any +// subdirectories. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - DirectoryClientGetPropertiesOptions contains the optional parameters for the DirectoryClient.GetProperties method. +func (client *DirectoryClient) GetProperties(ctx context.Context, options *DirectoryClientGetPropertiesOptions) (DirectoryClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options) + if err != nil { + return DirectoryClientGetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DirectoryClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DirectoryClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *DirectoryClient) getPropertiesCreateRequest(ctx context.Context, options *DirectoryClientGetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "directory") + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *DirectoryClient) getPropertiesHandleResponse(resp *http.Response) (DirectoryClientGetPropertiesResponse, error) { + result := DirectoryClientGetPropertiesResponse{} + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return DirectoryClientGetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return DirectoryClientGetPropertiesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return DirectoryClientGetPropertiesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-file-attributes"); val != "" { + result.FileAttributes = &val + } + if val := resp.Header.Get("x-ms-file-creation-time"); val != "" { + fileCreationTime, err := time.Parse(ISO8601, val) + if err != nil { + return DirectoryClientGetPropertiesResponse{}, err + } + result.FileCreationTime = &fileCreationTime + } + if val := resp.Header.Get("x-ms-file-last-write-time"); val != "" { + fileLastWriteTime, err := time.Parse(ISO8601, val) + if err != nil { + return DirectoryClientGetPropertiesResponse{}, err + } + result.FileLastWriteTime = &fileLastWriteTime + } + if val := resp.Header.Get("x-ms-file-change-time"); val != "" { + fileChangeTime, err := time.Parse(ISO8601, val) + if err != nil { + return DirectoryClientGetPropertiesResponse{}, err + } + result.FileChangeTime = &fileChangeTime + } + if val := resp.Header.Get("x-ms-file-permission-key"); val != "" { + result.FilePermissionKey = &val + } + if val := resp.Header.Get("x-ms-file-id"); val != "" { + result.ID = &val + } + if val := resp.Header.Get("x-ms-file-parent-id"); val != "" { + result.ParentID = &val + } + return result, nil +} + +// NewListFilesAndDirectoriesSegmentPager - Returns a list of files or directories under the specified share or directory. +// It lists the contents only for a single level of the directory hierarchy. +// +// Generated from API version 2020-10-02 +// - options - DirectoryClientListFilesAndDirectoriesSegmentOptions contains the optional parameters for the DirectoryClient.NewListFilesAndDirectoriesSegmentPager +// method. +// +// listFilesAndDirectoriesSegmentCreateRequest creates the ListFilesAndDirectoriesSegment request. +func (client *DirectoryClient) ListFilesAndDirectoriesSegmentCreateRequest(ctx context.Context, options *DirectoryClientListFilesAndDirectoriesSegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "directory") + reqQP.Set("comp", "list") + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.IncludeExtendedInfo != nil { + req.Raw().Header["x-ms-file-extended-info"] = []string{strconv.FormatBool(*options.IncludeExtendedInfo)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// listFilesAndDirectoriesSegmentHandleResponse handles the ListFilesAndDirectoriesSegment response. +func (client *DirectoryClient) ListFilesAndDirectoriesSegmentHandleResponse(resp *http.Response) (DirectoryClientListFilesAndDirectoriesSegmentResponse, error) { + result := DirectoryClientListFilesAndDirectoriesSegmentResponse{} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return DirectoryClientListFilesAndDirectoriesSegmentResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.ListFilesAndDirectoriesSegmentResponse); err != nil { + return DirectoryClientListFilesAndDirectoriesSegmentResponse{}, err + } + return result, nil +} + +// ListHandles - Lists handles for directory. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - DirectoryClientListHandlesOptions contains the optional parameters for the DirectoryClient.ListHandles method. +func (client *DirectoryClient) ListHandles(ctx context.Context, options *DirectoryClientListHandlesOptions) (DirectoryClientListHandlesResponse, error) { + req, err := client.listHandlesCreateRequest(ctx, options) + if err != nil { + return DirectoryClientListHandlesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DirectoryClientListHandlesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DirectoryClientListHandlesResponse{}, runtime.NewResponseError(resp) + } + return client.listHandlesHandleResponse(resp) +} + +// listHandlesCreateRequest creates the ListHandles request. +func (client *DirectoryClient) listHandlesCreateRequest(ctx context.Context, options *DirectoryClientListHandlesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "listhandles") + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Recursive != nil { + req.Raw().Header["x-ms-recursive"] = []string{strconv.FormatBool(*options.Recursive)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// listHandlesHandleResponse handles the ListHandles response. +func (client *DirectoryClient) listHandlesHandleResponse(resp *http.Response) (DirectoryClientListHandlesResponse, error) { + result := DirectoryClientListHandlesResponse{} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return DirectoryClientListHandlesResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.ListHandlesResponse); err != nil { + return DirectoryClientListHandlesResponse{}, err + } + return result, nil +} + +// SetMetadata - Updates user defined metadata for the specified directory. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - DirectoryClientSetMetadataOptions contains the optional parameters for the DirectoryClient.SetMetadata method. +func (client *DirectoryClient) SetMetadata(ctx context.Context, options *DirectoryClientSetMetadataOptions) (DirectoryClientSetMetadataResponse, error) { + req, err := client.setMetadataCreateRequest(ctx, options) + if err != nil { + return DirectoryClientSetMetadataResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DirectoryClientSetMetadataResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DirectoryClientSetMetadataResponse{}, runtime.NewResponseError(resp) + } + return client.setMetadataHandleResponse(resp) +} + +// setMetadataCreateRequest creates the SetMetadata request. +func (client *DirectoryClient) setMetadataCreateRequest(ctx context.Context, options *DirectoryClientSetMetadataOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "directory") + reqQP.Set("comp", "metadata") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setMetadataHandleResponse handles the SetMetadata response. +func (client *DirectoryClient) setMetadataHandleResponse(resp *http.Response) (DirectoryClientSetMetadataResponse, error) { + result := DirectoryClientSetMetadataResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return DirectoryClientSetMetadataResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return DirectoryClientSetMetadataResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + return result, nil +} + +// SetProperties - Sets properties on the directory. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - fileAttributes - If specified, the provided file attributes shall be set. Default value: ‘Archive’ for file and ‘Directory’ +// for directory. ‘None’ can also be specified as default. +// - fileCreationTime - Creation time for the file/directory. Default value: Now. +// - fileLastWriteTime - Last write time for the file/directory. Default value: Now. +// - options - DirectoryClientSetPropertiesOptions contains the optional parameters for the DirectoryClient.SetProperties method. +func (client *DirectoryClient) SetProperties(ctx context.Context, fileAttributes string, fileCreationTime string, fileLastWriteTime string, options *DirectoryClientSetPropertiesOptions) (DirectoryClientSetPropertiesResponse, error) { + req, err := client.setPropertiesCreateRequest(ctx, fileAttributes, fileCreationTime, fileLastWriteTime, options) + if err != nil { + return DirectoryClientSetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DirectoryClientSetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DirectoryClientSetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.setPropertiesHandleResponse(resp) +} + +// setPropertiesCreateRequest creates the SetProperties request. +func (client *DirectoryClient) setPropertiesCreateRequest(ctx context.Context, fileAttributes string, fileCreationTime string, fileLastWriteTime string, options *DirectoryClientSetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "directory") + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.FilePermission != nil { + req.Raw().Header["x-ms-file-permission"] = []string{*options.FilePermission} + } + if options != nil && options.FilePermissionKey != nil { + req.Raw().Header["x-ms-file-permission-key"] = []string{*options.FilePermissionKey} + } + req.Raw().Header["x-ms-file-attributes"] = []string{fileAttributes} + req.Raw().Header["x-ms-file-creation-time"] = []string{fileCreationTime} + req.Raw().Header["x-ms-file-last-write-time"] = []string{fileLastWriteTime} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setPropertiesHandleResponse handles the SetProperties response. +func (client *DirectoryClient) setPropertiesHandleResponse(resp *http.Response) (DirectoryClientSetPropertiesResponse, error) { + result := DirectoryClientSetPropertiesResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return DirectoryClientSetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return DirectoryClientSetPropertiesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return DirectoryClientSetPropertiesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-file-permission-key"); val != "" { + result.FilePermissionKey = &val + } + if val := resp.Header.Get("x-ms-file-attributes"); val != "" { + result.FileAttributes = &val + } + if val := resp.Header.Get("x-ms-file-creation-time"); val != "" { + fileCreationTime, err := time.Parse(ISO8601, val) + if err != nil { + return DirectoryClientSetPropertiesResponse{}, err + } + result.FileCreationTime = &fileCreationTime + } + if val := resp.Header.Get("x-ms-file-last-write-time"); val != "" { + fileLastWriteTime, err := time.Parse(ISO8601, val) + if err != nil { + return DirectoryClientSetPropertiesResponse{}, err + } + result.FileLastWriteTime = &fileLastWriteTime + } + if val := resp.Header.Get("x-ms-file-change-time"); val != "" { + fileChangeTime, err := time.Parse(ISO8601, val) + if err != nil { + return DirectoryClientSetPropertiesResponse{}, err + } + result.FileChangeTime = &fileChangeTime + } + if val := resp.Header.Get("x-ms-file-id"); val != "" { + result.ID = &val + } + if val := resp.Header.Get("x-ms-file-parent-id"); val != "" { + result.ParentID = &val + } + return result, nil +} diff --git a/sdk/storage/azfile/internal/generated/zz_file_client.go b/sdk/storage/azfile/internal/generated/zz_file_client.go new file mode 100644 index 000000000000..cfe2ea780a3b --- /dev/null +++ b/sdk/storage/azfile/internal/generated/zz_file_client.go @@ -0,0 +1,1826 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +// FileClient contains the methods for the File group. +// Don't use this type directly, use NewFileClient() instead. +type FileClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewFileClient creates a new instance of FileClient with the specified values. +// - endpoint - The URL of the service account, share, directory or file that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. +func NewFileClient(endpoint string, pl runtime.Pipeline) *FileClient { + client := &FileClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// AbortCopy - Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy File operation. +// - options - FileClientAbortCopyOptions contains the optional parameters for the FileClient.AbortCopy method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) AbortCopy(ctx context.Context, copyID string, options *FileClientAbortCopyOptions, leaseAccessConditions *LeaseAccessConditions) (FileClientAbortCopyResponse, error) { + req, err := client.abortCopyCreateRequest(ctx, copyID, options, leaseAccessConditions) + if err != nil { + return FileClientAbortCopyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientAbortCopyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return FileClientAbortCopyResponse{}, runtime.NewResponseError(resp) + } + return client.abortCopyHandleResponse(resp) +} + +// abortCopyCreateRequest creates the AbortCopy request. +func (client *FileClient) abortCopyCreateRequest(ctx context.Context, copyID string, options *FileClientAbortCopyOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "copy") + reqQP.Set("copyid", copyID) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-copy-action"] = []string{"abort"} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// abortCopyHandleResponse handles the AbortCopy response. +func (client *FileClient) abortCopyHandleResponse(resp *http.Response) (FileClientAbortCopyResponse, error) { + result := FileClientAbortCopyResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientAbortCopyResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// AcquireLease - [Update] The Lease File operation establishes and manages a lock on a file for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite +// lease can be between 15 and 60 seconds. A lease duration cannot be changed using +// renew or change. +// - options - FileClientAcquireLeaseOptions contains the optional parameters for the FileClient.AcquireLease method. +func (client *FileClient) AcquireLease(ctx context.Context, duration int32, options *FileClientAcquireLeaseOptions) (FileClientAcquireLeaseResponse, error) { + req, err := client.acquireLeaseCreateRequest(ctx, duration, options) + if err != nil { + return FileClientAcquireLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientAcquireLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return FileClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.acquireLeaseHandleResponse(resp) +} + +// acquireLeaseCreateRequest creates the AcquireLease request. +func (client *FileClient) acquireLeaseCreateRequest(ctx context.Context, duration int32, options *FileClientAcquireLeaseOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// acquireLeaseHandleResponse handles the AcquireLease response. +func (client *FileClient) acquireLeaseHandleResponse(resp *http.Response) (FileClientAcquireLeaseResponse, error) { + result := FileClientAcquireLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientAcquireLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientAcquireLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// BreakLease - [Update] The Lease File operation establishes and manages a lock on a file for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - FileClientBreakLeaseOptions contains the optional parameters for the FileClient.BreakLease method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) BreakLease(ctx context.Context, options *FileClientBreakLeaseOptions, leaseAccessConditions *LeaseAccessConditions) (FileClientBreakLeaseResponse, error) { + req, err := client.breakLeaseCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return FileClientBreakLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientBreakLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return FileClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.breakLeaseHandleResponse(resp) +} + +// breakLeaseCreateRequest creates the BreakLease request. +func (client *FileClient) breakLeaseCreateRequest(ctx context.Context, options *FileClientBreakLeaseOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"break"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// breakLeaseHandleResponse handles the BreakLease response. +func (client *FileClient) breakLeaseHandleResponse(resp *http.Response) (FileClientBreakLeaseResponse, error) { + result := FileClientBreakLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientBreakLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientBreakLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// ChangeLease - [Update] The Lease File operation establishes and manages a lock on a file for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - leaseID - Specifies the current lease ID on the resource. +// - options - FileClientChangeLeaseOptions contains the optional parameters for the FileClient.ChangeLease method. +func (client *FileClient) ChangeLease(ctx context.Context, leaseID string, options *FileClientChangeLeaseOptions) (FileClientChangeLeaseResponse, error) { + req, err := client.changeLeaseCreateRequest(ctx, leaseID, options) + if err != nil { + return FileClientChangeLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientChangeLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FileClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.changeLeaseHandleResponse(resp) +} + +// changeLeaseCreateRequest creates the ChangeLease request. +func (client *FileClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, options *FileClientChangeLeaseOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"change"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// changeLeaseHandleResponse handles the ChangeLease response. +func (client *FileClient) changeLeaseHandleResponse(resp *http.Response) (FileClientChangeLeaseResponse, error) { + result := FileClientChangeLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientChangeLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientChangeLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Create - Creates a new file or replaces a file. Note it only initializes the file with no content. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - fileContentLength - Specifies the maximum size for the file, up to 4 TB. +// - fileAttributes - If specified, the provided file attributes shall be set. Default value: ‘Archive’ for file and ‘Directory’ +// for directory. ‘None’ can also be specified as default. +// - fileCreationTime - Creation time for the file/directory. Default value: Now. +// - fileLastWriteTime - Last write time for the file/directory. Default value: Now. +// - options - FileClientCreateOptions contains the optional parameters for the FileClient.Create method. +// - ShareFileHTTPHeaders - ShareFileHTTPHeaders contains a group of parameters for the FileClient.Create method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) Create(ctx context.Context, fileContentLength int64, fileAttributes string, fileCreationTime string, fileLastWriteTime string, options *FileClientCreateOptions, shareFileHTTPHeaders *ShareFileHTTPHeaders, leaseAccessConditions *LeaseAccessConditions) (FileClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, fileContentLength, fileAttributes, fileCreationTime, fileLastWriteTime, options, shareFileHTTPHeaders, leaseAccessConditions) + if err != nil { + return FileClientCreateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return FileClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *FileClient) createCreateRequest(ctx context.Context, fileContentLength int64, fileAttributes string, fileCreationTime string, fileLastWriteTime string, options *FileClientCreateOptions, shareFileHTTPHeaders *ShareFileHTTPHeaders, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-content-length"] = []string{strconv.FormatInt(fileContentLength, 10)} + req.Raw().Header["x-ms-type"] = []string{"file"} + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.ContentType != nil { + req.Raw().Header["x-ms-content-type"] = []string{*shareFileHTTPHeaders.ContentType} + } + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.ContentEncoding != nil { + req.Raw().Header["x-ms-content-encoding"] = []string{*shareFileHTTPHeaders.ContentEncoding} + } + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.ContentLanguage != nil { + req.Raw().Header["x-ms-content-language"] = []string{*shareFileHTTPHeaders.ContentLanguage} + } + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.CacheControl != nil { + req.Raw().Header["x-ms-cache-control"] = []string{*shareFileHTTPHeaders.CacheControl} + } + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.ContentMD5 != nil { + req.Raw().Header["x-ms-content-md5"] = []string{base64.StdEncoding.EncodeToString(shareFileHTTPHeaders.ContentMD5)} + } + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.ContentDisposition != nil { + req.Raw().Header["x-ms-content-disposition"] = []string{*shareFileHTTPHeaders.ContentDisposition} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.FilePermission != nil { + req.Raw().Header["x-ms-file-permission"] = []string{*options.FilePermission} + } + if options != nil && options.FilePermissionKey != nil { + req.Raw().Header["x-ms-file-permission-key"] = []string{*options.FilePermissionKey} + } + req.Raw().Header["x-ms-file-attributes"] = []string{fileAttributes} + req.Raw().Header["x-ms-file-creation-time"] = []string{fileCreationTime} + req.Raw().Header["x-ms-file-last-write-time"] = []string{fileLastWriteTime} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *FileClient) createHandleResponse(resp *http.Response) (FileClientCreateResponse, error) { + result := FileClientCreateResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return FileClientCreateResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-file-permission-key"); val != "" { + result.FilePermissionKey = &val + } + if val := resp.Header.Get("x-ms-file-attributes"); val != "" { + result.FileAttributes = &val + } + if val := resp.Header.Get("x-ms-file-creation-time"); val != "" { + fileCreationTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientCreateResponse{}, err + } + result.FileCreationTime = &fileCreationTime + } + if val := resp.Header.Get("x-ms-file-last-write-time"); val != "" { + fileLastWriteTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientCreateResponse{}, err + } + result.FileLastWriteTime = &fileLastWriteTime + } + if val := resp.Header.Get("x-ms-file-change-time"); val != "" { + fileChangeTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientCreateResponse{}, err + } + result.FileChangeTime = &fileChangeTime + } + if val := resp.Header.Get("x-ms-file-id"); val != "" { + result.ID = &val + } + if val := resp.Header.Get("x-ms-file-parent-id"); val != "" { + result.ParentID = &val + } + return result, nil +} + +// Delete - removes the file from the storage account. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - FileClientDeleteOptions contains the optional parameters for the FileClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) Delete(ctx context.Context, options *FileClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions) (FileClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return FileClientDeleteResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return FileClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return client.deleteHandleResponse(resp) +} + +// deleteCreateRequest creates the Delete request. +func (client *FileClient) deleteCreateRequest(ctx context.Context, options *FileClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// deleteHandleResponse handles the Delete response. +func (client *FileClient) deleteHandleResponse(resp *http.Response) (FileClientDeleteResponse, error) { + result := FileClientDeleteResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientDeleteResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Download - Reads or downloads a file from the system, including its metadata and properties. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - FileClientDownloadOptions contains the optional parameters for the FileClient.Download method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) Download(ctx context.Context, options *FileClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions) (FileClientDownloadResponse, error) { + req, err := client.downloadCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return FileClientDownloadResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientDownloadResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent) { + return FileClientDownloadResponse{}, runtime.NewResponseError(resp) + } + return client.downloadHandleResponse(resp) +} + +// downloadCreateRequest creates the Download request. +func (client *FileClient) downloadCreateRequest(ctx context.Context, options *FileClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if options != nil && options.RangeGetContentMD5 != nil { + req.Raw().Header["x-ms-range-get-content-md5"] = []string{strconv.FormatBool(*options.RangeGetContentMD5)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// downloadHandleResponse handles the Download response. +func (client *FileClient) downloadHandleResponse(resp *http.Response) (FileClientDownloadResponse, error) { + result := FileClientDownloadResponse{Body: resp.Body} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientDownloadResponse{}, err + } + result.LastModified = &lastModified + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return FileClientDownloadResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return FileClientDownloadResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientDownloadResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientDownloadResponse{}, err + } + result.CopyCompletionTime = ©CompletionTime + } + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val + } + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("x-ms-content-md5"); val != "" { + fileContentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return FileClientDownloadResponse{}, err + } + result.FileContentMD5 = fileContentMD5 + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return FileClientDownloadResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-file-attributes"); val != "" { + result.FileAttributes = &val + } + if val := resp.Header.Get("x-ms-file-creation-time"); val != "" { + fileCreationTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientDownloadResponse{}, err + } + result.FileCreationTime = &fileCreationTime + } + if val := resp.Header.Get("x-ms-file-last-write-time"); val != "" { + fileLastWriteTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientDownloadResponse{}, err + } + result.FileLastWriteTime = &fileLastWriteTime + } + if val := resp.Header.Get("x-ms-file-change-time"); val != "" { + fileChangeTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientDownloadResponse{}, err + } + result.FileChangeTime = &fileChangeTime + } + if val := resp.Header.Get("x-ms-file-permission-key"); val != "" { + result.FilePermissionKey = &val + } + if val := resp.Header.Get("x-ms-file-id"); val != "" { + result.ID = &val + } + if val := resp.Header.Get("x-ms-file-parent-id"); val != "" { + result.ParentID = &val + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + return result, nil +} + +// ForceCloseHandles - Closes all handles open for given file +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - handleID - Specifies handle ID opened on the file or directory to be closed. Asterisk (‘*’) is a wildcard that specifies +// all handles. +// - options - FileClientForceCloseHandlesOptions contains the optional parameters for the FileClient.ForceCloseHandles method. +func (client *FileClient) ForceCloseHandles(ctx context.Context, handleID string, options *FileClientForceCloseHandlesOptions) (FileClientForceCloseHandlesResponse, error) { + req, err := client.forceCloseHandlesCreateRequest(ctx, handleID, options) + if err != nil { + return FileClientForceCloseHandlesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientForceCloseHandlesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FileClientForceCloseHandlesResponse{}, runtime.NewResponseError(resp) + } + return client.forceCloseHandlesHandleResponse(resp) +} + +// forceCloseHandlesCreateRequest creates the ForceCloseHandles request. +func (client *FileClient) forceCloseHandlesCreateRequest(ctx context.Context, handleID string, options *FileClientForceCloseHandlesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "forceclosehandles") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-handle-id"] = []string{handleID} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// forceCloseHandlesHandleResponse handles the ForceCloseHandles response. +func (client *FileClient) forceCloseHandlesHandleResponse(resp *http.Response) (FileClientForceCloseHandlesResponse, error) { + result := FileClientForceCloseHandlesResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientForceCloseHandlesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-marker"); val != "" { + result.Marker = &val + } + if val := resp.Header.Get("x-ms-number-of-handles-closed"); val != "" { + numberOfHandlesClosed32, err := strconv.ParseInt(val, 10, 32) + numberOfHandlesClosed := int32(numberOfHandlesClosed32) + if err != nil { + return FileClientForceCloseHandlesResponse{}, err + } + result.NumberOfHandlesClosed = &numberOfHandlesClosed + } + if val := resp.Header.Get("x-ms-number-of-handles-failed"); val != "" { + numberOfHandlesFailedToClose32, err := strconv.ParseInt(val, 10, 32) + numberOfHandlesFailedToClose := int32(numberOfHandlesFailedToClose32) + if err != nil { + return FileClientForceCloseHandlesResponse{}, err + } + result.NumberOfHandlesFailedToClose = &numberOfHandlesFailedToClose + } + return result, nil +} + +// GetProperties - Returns all user-defined metadata, standard HTTP properties, and system properties for the file. It does +// not return the content of the file. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - FileClientGetPropertiesOptions contains the optional parameters for the FileClient.GetProperties method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) GetProperties(ctx context.Context, options *FileClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (FileClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return FileClientGetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FileClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *FileClient) getPropertiesCreateRequest(ctx context.Context, options *FileClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodHead, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *FileClient) getPropertiesHandleResponse(resp *http.Response) (FileClientGetPropertiesResponse, error) { + result := FileClientGetPropertiesResponse{} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientGetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-type"); val != "" { + result.FileType = &val + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return FileClientGetPropertiesResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return FileClientGetPropertiesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientGetPropertiesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientGetPropertiesResponse{}, err + } + result.CopyCompletionTime = ©CompletionTime + } + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val + } + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return FileClientGetPropertiesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-file-attributes"); val != "" { + result.FileAttributes = &val + } + if val := resp.Header.Get("x-ms-file-creation-time"); val != "" { + fileCreationTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientGetPropertiesResponse{}, err + } + result.FileCreationTime = &fileCreationTime + } + if val := resp.Header.Get("x-ms-file-last-write-time"); val != "" { + fileLastWriteTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientGetPropertiesResponse{}, err + } + result.FileLastWriteTime = &fileLastWriteTime + } + if val := resp.Header.Get("x-ms-file-change-time"); val != "" { + fileChangeTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientGetPropertiesResponse{}, err + } + result.FileChangeTime = &fileChangeTime + } + if val := resp.Header.Get("x-ms-file-permission-key"); val != "" { + result.FilePermissionKey = &val + } + if val := resp.Header.Get("x-ms-file-id"); val != "" { + result.ID = &val + } + if val := resp.Header.Get("x-ms-file-parent-id"); val != "" { + result.ParentID = &val + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + return result, nil +} + +// GetRangeList - Returns the list of valid ranges for a file. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - FileClientGetRangeListOptions contains the optional parameters for the FileClient.GetRangeList method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) GetRangeList(ctx context.Context, options *FileClientGetRangeListOptions, leaseAccessConditions *LeaseAccessConditions) (FileClientGetRangeListResponse, error) { + req, err := client.getRangeListCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return FileClientGetRangeListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientGetRangeListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FileClientGetRangeListResponse{}, runtime.NewResponseError(resp) + } + return client.getRangeListHandleResponse(resp) +} + +// getRangeListCreateRequest creates the GetRangeList request. +func (client *FileClient) getRangeListCreateRequest(ctx context.Context, options *FileClientGetRangeListOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "rangelist") + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + if options != nil && options.Prevsharesnapshot != nil { + reqQP.Set("prevsharesnapshot", *options.Prevsharesnapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getRangeListHandleResponse handles the GetRangeList response. +func (client *FileClient) getRangeListHandleResponse(resp *http.Response) (FileClientGetRangeListResponse, error) { + result := FileClientGetRangeListResponse{} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientGetRangeListResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-content-length"); val != "" { + fileContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return FileClientGetRangeListResponse{}, err + } + result.FileContentLength = &fileContentLength + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientGetRangeListResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.ShareFileRangeList); err != nil { + return FileClientGetRangeListResponse{}, err + } + return result, nil +} + +// ListHandles - Lists handles for file +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - FileClientListHandlesOptions contains the optional parameters for the FileClient.ListHandles method. +func (client *FileClient) ListHandles(ctx context.Context, options *FileClientListHandlesOptions) (FileClientListHandlesResponse, error) { + req, err := client.listHandlesCreateRequest(ctx, options) + if err != nil { + return FileClientListHandlesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientListHandlesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FileClientListHandlesResponse{}, runtime.NewResponseError(resp) + } + return client.listHandlesHandleResponse(resp) +} + +// listHandlesCreateRequest creates the ListHandles request. +func (client *FileClient) listHandlesCreateRequest(ctx context.Context, options *FileClientListHandlesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "listhandles") + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// listHandlesHandleResponse handles the ListHandles response. +func (client *FileClient) listHandlesHandleResponse(resp *http.Response) (FileClientListHandlesResponse, error) { + result := FileClientListHandlesResponse{} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientListHandlesResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.ListHandlesResponse); err != nil { + return FileClientListHandlesResponse{}, err + } + return result, nil +} + +// ReleaseLease - [Update] The Lease File operation establishes and manages a lock on a file for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - leaseID - Specifies the current lease ID on the resource. +// - options - FileClientReleaseLeaseOptions contains the optional parameters for the FileClient.ReleaseLease method. +func (client *FileClient) ReleaseLease(ctx context.Context, leaseID string, options *FileClientReleaseLeaseOptions) (FileClientReleaseLeaseResponse, error) { + req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options) + if err != nil { + return FileClientReleaseLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientReleaseLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FileClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.releaseLeaseHandleResponse(resp) +} + +// releaseLeaseCreateRequest creates the ReleaseLease request. +func (client *FileClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, options *FileClientReleaseLeaseOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"release"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// releaseLeaseHandleResponse handles the ReleaseLease response. +func (client *FileClient) releaseLeaseHandleResponse(resp *http.Response) (FileClientReleaseLeaseResponse, error) { + result := FileClientReleaseLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientReleaseLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientReleaseLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetHTTPHeaders - Sets HTTP headers on the file. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - fileAttributes - If specified, the provided file attributes shall be set. Default value: ‘Archive’ for file and ‘Directory’ +// for directory. ‘None’ can also be specified as default. +// - fileCreationTime - Creation time for the file/directory. Default value: Now. +// - fileLastWriteTime - Last write time for the file/directory. Default value: Now. +// - options - FileClientSetHTTPHeadersOptions contains the optional parameters for the FileClient.SetHTTPHeaders method. +// - ShareFileHTTPHeaders - ShareFileHTTPHeaders contains a group of parameters for the FileClient.Create method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) SetHTTPHeaders(ctx context.Context, fileAttributes string, fileCreationTime string, fileLastWriteTime string, options *FileClientSetHTTPHeadersOptions, shareFileHTTPHeaders *ShareFileHTTPHeaders, leaseAccessConditions *LeaseAccessConditions) (FileClientSetHTTPHeadersResponse, error) { + req, err := client.setHTTPHeadersCreateRequest(ctx, fileAttributes, fileCreationTime, fileLastWriteTime, options, shareFileHTTPHeaders, leaseAccessConditions) + if err != nil { + return FileClientSetHTTPHeadersResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientSetHTTPHeadersResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FileClientSetHTTPHeadersResponse{}, runtime.NewResponseError(resp) + } + return client.setHTTPHeadersHandleResponse(resp) +} + +// setHTTPHeadersCreateRequest creates the SetHTTPHeaders request. +func (client *FileClient) setHTTPHeadersCreateRequest(ctx context.Context, fileAttributes string, fileCreationTime string, fileLastWriteTime string, options *FileClientSetHTTPHeadersOptions, shareFileHTTPHeaders *ShareFileHTTPHeaders, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.FileContentLength != nil { + req.Raw().Header["x-ms-content-length"] = []string{strconv.FormatInt(*options.FileContentLength, 10)} + } + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.ContentType != nil { + req.Raw().Header["x-ms-content-type"] = []string{*shareFileHTTPHeaders.ContentType} + } + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.ContentEncoding != nil { + req.Raw().Header["x-ms-content-encoding"] = []string{*shareFileHTTPHeaders.ContentEncoding} + } + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.ContentLanguage != nil { + req.Raw().Header["x-ms-content-language"] = []string{*shareFileHTTPHeaders.ContentLanguage} + } + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.CacheControl != nil { + req.Raw().Header["x-ms-cache-control"] = []string{*shareFileHTTPHeaders.CacheControl} + } + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.ContentMD5 != nil { + req.Raw().Header["x-ms-content-md5"] = []string{base64.StdEncoding.EncodeToString(shareFileHTTPHeaders.ContentMD5)} + } + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.ContentDisposition != nil { + req.Raw().Header["x-ms-content-disposition"] = []string{*shareFileHTTPHeaders.ContentDisposition} + } + if options != nil && options.FilePermission != nil { + req.Raw().Header["x-ms-file-permission"] = []string{*options.FilePermission} + } + if options != nil && options.FilePermissionKey != nil { + req.Raw().Header["x-ms-file-permission-key"] = []string{*options.FilePermissionKey} + } + req.Raw().Header["x-ms-file-attributes"] = []string{fileAttributes} + req.Raw().Header["x-ms-file-creation-time"] = []string{fileCreationTime} + req.Raw().Header["x-ms-file-last-write-time"] = []string{fileLastWriteTime} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setHTTPHeadersHandleResponse handles the SetHTTPHeaders response. +func (client *FileClient) setHTTPHeadersHandleResponse(resp *http.Response) (FileClientSetHTTPHeadersResponse, error) { + result := FileClientSetHTTPHeadersResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientSetHTTPHeadersResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientSetHTTPHeadersResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return FileClientSetHTTPHeadersResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-file-permission-key"); val != "" { + result.FilePermissionKey = &val + } + if val := resp.Header.Get("x-ms-file-attributes"); val != "" { + result.FileAttributes = &val + } + if val := resp.Header.Get("x-ms-file-creation-time"); val != "" { + fileCreationTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientSetHTTPHeadersResponse{}, err + } + result.FileCreationTime = &fileCreationTime + } + if val := resp.Header.Get("x-ms-file-last-write-time"); val != "" { + fileLastWriteTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientSetHTTPHeadersResponse{}, err + } + result.FileLastWriteTime = &fileLastWriteTime + } + if val := resp.Header.Get("x-ms-file-change-time"); val != "" { + fileChangeTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientSetHTTPHeadersResponse{}, err + } + result.FileChangeTime = &fileChangeTime + } + if val := resp.Header.Get("x-ms-file-id"); val != "" { + result.ID = &val + } + if val := resp.Header.Get("x-ms-file-parent-id"); val != "" { + result.ParentID = &val + } + return result, nil +} + +// SetMetadata - Updates user-defined metadata for the specified file. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - FileClientSetMetadataOptions contains the optional parameters for the FileClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) SetMetadata(ctx context.Context, options *FileClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions) (FileClientSetMetadataResponse, error) { + req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return FileClientSetMetadataResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientSetMetadataResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FileClientSetMetadataResponse{}, runtime.NewResponseError(resp) + } + return client.setMetadataHandleResponse(resp) +} + +// setMetadataCreateRequest creates the SetMetadata request. +func (client *FileClient) setMetadataCreateRequest(ctx context.Context, options *FileClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "metadata") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setMetadataHandleResponse handles the SetMetadata response. +func (client *FileClient) setMetadataHandleResponse(resp *http.Response) (FileClientSetMetadataResponse, error) { + result := FileClientSetMetadataResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientSetMetadataResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return FileClientSetMetadataResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientSetMetadataResponse{}, err + } + result.LastModified = &lastModified + } + return result, nil +} + +// StartCopy - Copies a blob or file to a destination file within the storage account. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - copySource - Specifies the URL of the source file or blob, up to 2 KB in length. To copy a file to another file within +// the same storage account, you may use Shared Key to authenticate the source file. If you are +// copying a file from another storage account, or if you are copying a blob from the same storage account or another storage +// account, then you must authenticate the source file or blob using a shared +// access signature. If the source is a public blob, no authentication is required to perform the copy operation. A file in +// a share snapshot can also be specified as a copy source. +// - options - FileClientStartCopyOptions contains the optional parameters for the FileClient.StartCopy method. +// - CopyFileSMBInfo - CopyFileSMBInfo contains a group of parameters for the FileClient.StartCopy method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) StartCopy(ctx context.Context, copySource string, options *FileClientStartCopyOptions, copyFileSMBInfo *CopyFileSMBInfo, leaseAccessConditions *LeaseAccessConditions) (FileClientStartCopyResponse, error) { + req, err := client.startCopyCreateRequest(ctx, copySource, options, copyFileSMBInfo, leaseAccessConditions) + if err != nil { + return FileClientStartCopyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientStartCopyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return FileClientStartCopyResponse{}, runtime.NewResponseError(resp) + } + return client.startCopyHandleResponse(resp) +} + +// startCopyCreateRequest creates the StartCopy request. +func (client *FileClient) startCopyCreateRequest(ctx context.Context, copySource string, options *FileClientStartCopyOptions, copyFileSMBInfo *CopyFileSMBInfo, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if options != nil && options.FilePermission != nil { + req.Raw().Header["x-ms-file-permission"] = []string{*options.FilePermission} + } + if options != nil && options.FilePermissionKey != nil { + req.Raw().Header["x-ms-file-permission-key"] = []string{*options.FilePermissionKey} + } + if copyFileSMBInfo != nil && copyFileSMBInfo.FilePermissionCopyMode != nil { + req.Raw().Header["x-ms-file-permission-copy-mode"] = []string{string(*copyFileSMBInfo.FilePermissionCopyMode)} + } + if copyFileSMBInfo != nil && copyFileSMBInfo.IgnoreReadOnly != nil { + req.Raw().Header["x-ms-file-copy-ignore-readonly"] = []string{strconv.FormatBool(*copyFileSMBInfo.IgnoreReadOnly)} + } + if copyFileSMBInfo != nil && copyFileSMBInfo.FileAttributes != nil { + req.Raw().Header["x-ms-file-attributes"] = []string{*copyFileSMBInfo.FileAttributes} + } + if copyFileSMBInfo != nil && copyFileSMBInfo.FileCreationTime != nil { + req.Raw().Header["x-ms-file-creation-time"] = []string{*copyFileSMBInfo.FileCreationTime} + } + if copyFileSMBInfo != nil && copyFileSMBInfo.FileLastWriteTime != nil { + req.Raw().Header["x-ms-file-last-write-time"] = []string{*copyFileSMBInfo.FileLastWriteTime} + } + if copyFileSMBInfo != nil && copyFileSMBInfo.SetArchiveAttribute != nil { + req.Raw().Header["x-ms-file-copy-set-archive"] = []string{strconv.FormatBool(*copyFileSMBInfo.SetArchiveAttribute)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// startCopyHandleResponse handles the StartCopy response. +func (client *FileClient) startCopyHandleResponse(resp *http.Response) (FileClientStartCopyResponse, error) { + result := FileClientStartCopyResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientStartCopyResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientStartCopyResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + return result, nil +} + +// UploadRange - Upload a range of bytes to a file. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - rangeParam - Specifies the range of bytes to be written. Both the start and end of the range must be specified. For an +// update operation, the range can be up to 4 MB in size. For a clear operation, the range can be +// up to the value of the file's full size. The File service accepts only a single byte range for the Range and 'x-ms-range' +// headers, and the byte range must be specified in the following format: +// bytes=startByte-endByte. +// - fileRangeWrite - Specify one of the following options: - Update: Writes the bytes specified by the request body into the +// specified range. The Range and Content-Length headers must match to perform the update. - Clear: +// Clears the specified range and releases the space used in storage for that range. To clear a range, set the Content-Length +// header to zero, and set the Range header to a value that indicates the range +// to clear, up to maximum file size. +// - contentLength - Specifies the number of bytes being transmitted in the request body. When the x-ms-write header is set +// to clear, the value of this header must be set to zero. +// - optionalbody - Initial data. +// - options - FileClientUploadRangeOptions contains the optional parameters for the FileClient.UploadRange method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) UploadRange(ctx context.Context, rangeParam string, fileRangeWrite FileRangeWriteType, contentLength int64, optionalbody io.ReadSeekCloser, options *FileClientUploadRangeOptions, leaseAccessConditions *LeaseAccessConditions) (FileClientUploadRangeResponse, error) { + req, err := client.uploadRangeCreateRequest(ctx, rangeParam, fileRangeWrite, contentLength, optionalbody, options, leaseAccessConditions) + if err != nil { + return FileClientUploadRangeResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientUploadRangeResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return FileClientUploadRangeResponse{}, runtime.NewResponseError(resp) + } + return client.uploadRangeHandleResponse(resp) +} + +// uploadRangeCreateRequest creates the UploadRange request. +func (client *FileClient) uploadRangeCreateRequest(ctx context.Context, rangeParam string, fileRangeWrite FileRangeWriteType, contentLength int64, optionalbody io.ReadSeekCloser, options *FileClientUploadRangeOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "range") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-range"] = []string{rangeParam} + req.Raw().Header["x-ms-write"] = []string{string(fileRangeWrite)} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.ContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.ContentMD5)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, req.SetBody(optionalbody, "application/octet-stream") +} + +// uploadRangeHandleResponse handles the UploadRange response. +func (client *FileClient) uploadRangeHandleResponse(resp *http.Response) (FileClientUploadRangeResponse, error) { + result := FileClientUploadRangeResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientUploadRangeResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return FileClientUploadRangeResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientUploadRangeResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return FileClientUploadRangeResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + return result, nil +} + +// UploadRangeFromURL - Upload a range of bytes to a file where the contents are read from a URL. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - rangeParam - Writes data to the specified byte range in the file. +// - copySource - Specifies the URL of the source file or blob, up to 2 KB in length. To copy a file to another file within +// the same storage account, you may use Shared Key to authenticate the source file. If you are +// copying a file from another storage account, or if you are copying a blob from the same storage account or another storage +// account, then you must authenticate the source file or blob using a shared +// access signature. If the source is a public blob, no authentication is required to perform the copy operation. A file in +// a share snapshot can also be specified as a copy source. +// - contentLength - Specifies the number of bytes being transmitted in the request body. When the x-ms-write header is set +// to clear, the value of this header must be set to zero. +// - options - FileClientUploadRangeFromURLOptions contains the optional parameters for the FileClient.UploadRangeFromURL method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the FileClient.UploadRangeFromURL +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) UploadRangeFromURL(ctx context.Context, rangeParam string, copySource string, contentLength int64, options *FileClientUploadRangeFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (FileClientUploadRangeFromURLResponse, error) { + req, err := client.uploadRangeFromURLCreateRequest(ctx, rangeParam, copySource, contentLength, options, sourceModifiedAccessConditions, leaseAccessConditions) + if err != nil { + return FileClientUploadRangeFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientUploadRangeFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return FileClientUploadRangeFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.uploadRangeFromURLHandleResponse(resp) +} + +// uploadRangeFromURLCreateRequest creates the UploadRangeFromURL request. +func (client *FileClient) uploadRangeFromURLCreateRequest(ctx context.Context, rangeParam string, copySource string, contentLength int64, options *FileClientUploadRangeFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "range") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-range"] = []string{rangeParam} + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if options != nil && options.SourceRange != nil { + req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} + } + req.Raw().Header["x-ms-write"] = []string{"update"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.SourceContentCRC64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentCRC64)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatchCRC64 != nil { + req.Raw().Header["x-ms-source-if-match-crc64"] = []string{base64.StdEncoding.EncodeToString(sourceModifiedAccessConditions.SourceIfMatchCRC64)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatchCRC64 != nil { + req.Raw().Header["x-ms-source-if-none-match-crc64"] = []string{base64.StdEncoding.EncodeToString(sourceModifiedAccessConditions.SourceIfNoneMatchCRC64)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// uploadRangeFromURLHandleResponse handles the UploadRangeFromURL response. +func (client *FileClient) uploadRangeFromURLHandleResponse(resp *http.Response) (FileClientUploadRangeFromURLResponse, error) { + result := FileClientUploadRangeFromURLResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientUploadRangeFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return FileClientUploadRangeFromURLResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientUploadRangeFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return FileClientUploadRangeFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return FileClientUploadRangeFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + return result, nil +} diff --git a/sdk/storage/azfile/internal/generated/zz_models.go b/sdk/storage/azfile/internal/generated/zz_models.go new file mode 100644 index 000000000000..95443aea430f --- /dev/null +++ b/sdk/storage/azfile/internal/generated/zz_models.go @@ -0,0 +1,932 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "time" +) + +// AccessPolicy - An Access policy. +type AccessPolicy struct { + // The date-time the policy expires. + Expiry *time.Time `xml:"Expiry"` + + // The permissions for the ACL policy. + Permission *string `xml:"Permission"` + + // The date-time the policy is active. + Start *time.Time `xml:"Start"` +} + +type ClearRange struct { + // REQUIRED + End *int64 `xml:"End"` + + // REQUIRED + Start *int64 `xml:"Start"` +} + +// CopyFileSMBInfo contains a group of parameters for the FileClient.StartCopy method. +type CopyFileSMBInfo struct { + // Specifies either the option to copy file attributes from a source file(source) to a target file or a list of attributes + // to set on a target file. + FileAttributes *string + // Specifies either the option to copy file creation time from a source file(source) to a target file or a time value in ISO + // 8601 format to set as creation time on a target file. + FileCreationTime *string + // Specifies either the option to copy file last write time from a source file(source) to a target file or a time value in + // ISO 8601 format to set as last write time on a target file. + FileLastWriteTime *string + // Specifies the option to copy file security descriptor from source file or to set it using the value which is defined by + // the header value of x-ms-file-permission or x-ms-file-permission-key. + FilePermissionCopyMode *PermissionCopyModeType + // Specifies the option to overwrite the target file if it already exists and has read-only attribute set. + IgnoreReadOnly *bool + // Specifies the option to set archive attribute on a target file. True means archive attribute will be set on a target file + // despite attribute overrides or a source file state. + SetArchiveAttribute *bool +} + +// CORSRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another +// domain. Web browsers implement a security restriction known as same-origin policy that +// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin +// domain) to call APIs in another domain. +type CORSRule struct { + // REQUIRED; The request headers that the origin domain may specify on the CORS request. + AllowedHeaders *string `xml:"AllowedHeaders"` + + // REQUIRED; The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated) + AllowedMethods *string `xml:"AllowedMethods"` + + // REQUIRED; The origin domains that are permitted to make a request against the storage service via CORS. The origin domain + // is the domain from which the request originates. Note that the origin must be an exact + // case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' + // to allow all origin domains to make requests via CORS. + AllowedOrigins *string `xml:"AllowedOrigins"` + + // REQUIRED; The response headers that may be sent in the response to the CORS request and exposed by the browser to the request + // issuer. + ExposedHeaders *string `xml:"ExposedHeaders"` + + // REQUIRED; The maximum amount time that a browser should cache the preflight OPTIONS request. + MaxAgeInSeconds *int32 `xml:"MaxAgeInSeconds"` +} + +// Directory - A listed directory item. +type Directory struct { + // REQUIRED + Name *string `xml:"Name"` + Attributes *string `xml:"Attributes"` + ID *string `xml:"FileId"` + PermissionKey *string `xml:"PermissionKey"` + + // File properties. + Properties *FileProperty `xml:"Properties"` +} + +// DirectoryClientCreateOptions contains the optional parameters for the DirectoryClient.Create method. +type DirectoryClientCreateOptions struct { + // If specified the permission (security descriptor) shall be set for the directory/file. This header can be used if Permission + // size is <= 8KB, else x-ms-file-permission-key header shall be used. Default + // value: Inherit. If SDDL is specified as input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission + // or x-ms-file-permission-key should be specified. + FilePermission *string + // Key of the permission to be set for the directory/file. Note: Only one of the x-ms-file-permission or x-ms-file-permission-key + // should be specified. + FilePermissionKey *string + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// DirectoryClientDeleteOptions contains the optional parameters for the DirectoryClient.Delete method. +type DirectoryClientDeleteOptions struct { + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// DirectoryClientForceCloseHandlesOptions contains the optional parameters for the DirectoryClient.ForceCloseHandles method. +type DirectoryClientForceCloseHandlesOptions struct { + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the response body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + // Specifies operation should apply to the directory specified in the URI, its files, its subdirectories and their files. + Recursive *bool + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// DirectoryClientGetPropertiesOptions contains the optional parameters for the DirectoryClient.GetProperties method. +type DirectoryClientGetPropertiesOptions struct { + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// DirectoryClientListFilesAndDirectoriesSegmentOptions contains the optional parameters for the DirectoryClient.NewListFilesAndDirectoriesSegmentPager +// method. +type DirectoryClientListFilesAndDirectoriesSegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListFilesIncludeType + // Include extended information. + IncludeExtendedInfo *bool + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the response body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + // Specifies the maximum number of entries to return. If the request does not specify maxresults, or specifies a value greater + // than 5,000, the server will return up to 5,000 items. + Maxresults *int32 + // Filters the results to return only entries whose name begins with the specified prefix. + Prefix *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// DirectoryClientListHandlesOptions contains the optional parameters for the DirectoryClient.ListHandles method. +type DirectoryClientListHandlesOptions struct { + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the response body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + // Specifies the maximum number of entries to return. If the request does not specify maxresults, or specifies a value greater + // than 5,000, the server will return up to 5,000 items. + Maxresults *int32 + // Specifies operation should apply to the directory specified in the URI, its files, its subdirectories and their files. + Recursive *bool + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// DirectoryClientSetMetadataOptions contains the optional parameters for the DirectoryClient.SetMetadata method. +type DirectoryClientSetMetadataOptions struct { + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// DirectoryClientSetPropertiesOptions contains the optional parameters for the DirectoryClient.SetProperties method. +type DirectoryClientSetPropertiesOptions struct { + // If specified the permission (security descriptor) shall be set for the directory/file. This header can be used if Permission + // size is <= 8KB, else x-ms-file-permission-key header shall be used. Default + // value: Inherit. If SDDL is specified as input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission + // or x-ms-file-permission-key should be specified. + FilePermission *string + // Key of the permission to be set for the directory/file. Note: Only one of the x-ms-file-permission or x-ms-file-permission-key + // should be specified. + FilePermissionKey *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// File - A listed file item. +type File struct { + // REQUIRED + Name *string `xml:"Name"` + + // REQUIRED; File properties. + Properties *FileProperty `xml:"Properties"` + Attributes *string `xml:"Attributes"` + ID *string `xml:"FileId"` + PermissionKey *string `xml:"PermissionKey"` +} + +// FileClientAbortCopyOptions contains the optional parameters for the FileClient.AbortCopy method. +type FileClientAbortCopyOptions struct { + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientAcquireLeaseOptions contains the optional parameters for the FileClient.AcquireLease method. +type FileClientAcquireLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The File service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientBreakLeaseOptions contains the optional parameters for the FileClient.BreakLease method. +type FileClientBreakLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientChangeLeaseOptions contains the optional parameters for the FileClient.ChangeLease method. +type FileClientChangeLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The File service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientCreateOptions contains the optional parameters for the FileClient.Create method. +type FileClientCreateOptions struct { + // If specified the permission (security descriptor) shall be set for the directory/file. This header can be used if Permission + // size is <= 8KB, else x-ms-file-permission-key header shall be used. Default + // value: Inherit. If SDDL is specified as input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission + // or x-ms-file-permission-key should be specified. + FilePermission *string + // Key of the permission to be set for the directory/file. Note: Only one of the x-ms-file-permission or x-ms-file-permission-key + // should be specified. + FilePermissionKey *string + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientDeleteOptions contains the optional parameters for the FileClient.Delete method. +type FileClientDeleteOptions struct { + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientDownloadOptions contains the optional parameters for the FileClient.Download method. +type FileClientDownloadOptions struct { + // Return file data only from the specified byte range. + Range *string + // When this header is set to true and specified together with the Range header, the service returns the MD5 hash for the + // range, as long as the range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientForceCloseHandlesOptions contains the optional parameters for the FileClient.ForceCloseHandles method. +type FileClientForceCloseHandlesOptions struct { + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the response body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientGetPropertiesOptions contains the optional parameters for the FileClient.GetProperties method. +type FileClientGetPropertiesOptions struct { + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientGetRangeListOptions contains the optional parameters for the FileClient.GetRangeList method. +type FileClientGetRangeListOptions struct { + // The previous snapshot parameter is an opaque DateTime value that, when present, specifies the previous snapshot. + Prevsharesnapshot *string + // Specifies the range of bytes over which to list ranges, inclusively. + Range *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientListHandlesOptions contains the optional parameters for the FileClient.ListHandles method. +type FileClientListHandlesOptions struct { + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the response body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + // Specifies the maximum number of entries to return. If the request does not specify maxresults, or specifies a value greater + // than 5,000, the server will return up to 5,000 items. + Maxresults *int32 + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientReleaseLeaseOptions contains the optional parameters for the FileClient.ReleaseLease method. +type FileClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientSetHTTPHeadersOptions contains the optional parameters for the FileClient.SetHTTPHeaders method. +type FileClientSetHTTPHeadersOptions struct { + // Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges + // above the specified byte value are cleared. + FileContentLength *int64 + // If specified the permission (security descriptor) shall be set for the directory/file. This header can be used if Permission + // size is <= 8KB, else x-ms-file-permission-key header shall be used. Default + // value: Inherit. If SDDL is specified as input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission + // or x-ms-file-permission-key should be specified. + FilePermission *string + // Key of the permission to be set for the directory/file. Note: Only one of the x-ms-file-permission or x-ms-file-permission-key + // should be specified. + FilePermissionKey *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientSetMetadataOptions contains the optional parameters for the FileClient.SetMetadata method. +type FileClientSetMetadataOptions struct { + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientStartCopyOptions contains the optional parameters for the FileClient.StartCopy method. +type FileClientStartCopyOptions struct { + // If specified the permission (security descriptor) shall be set for the directory/file. This header can be used if Permission + // size is <= 8KB, else x-ms-file-permission-key header shall be used. Default + // value: Inherit. If SDDL is specified as input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission + // or x-ms-file-permission-key should be specified. + FilePermission *string + // Key of the permission to be set for the directory/file. Note: Only one of the x-ms-file-permission or x-ms-file-permission-key + // should be specified. + FilePermissionKey *string + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientUploadRangeFromURLOptions contains the optional parameters for the FileClient.UploadRangeFromURL method. +type FileClientUploadRangeFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentCRC64 []byte + // Bytes of source data in the specified range. + SourceRange *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientUploadRangeOptions contains the optional parameters for the FileClient.UploadRange method. +type FileClientUploadRangeOptions struct { + // An MD5 hash of the content. This hash is used to verify the integrity of the data during transport. When the Content-MD5 + // header is specified, the File service compares the hash of the content that has + // arrived with the header value that was sent. If the two hashes do not match, the operation will fail with error code 400 + // (Bad Request). + ContentMD5 []byte + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileProperty - File properties. +type FileProperty struct { + // REQUIRED; Content length of the file. This value may not be up-to-date since an SMB client may have modified the file locally. + // The value of Content-Length may not reflect that fact until the handle is closed or + // the op-lock is broken. To retrieve current property values, call Get File Properties. + ContentLength *int64 `xml:"Content-Length"` + ChangeTime *time.Time `xml:"ChangeTime"` + CreationTime *time.Time `xml:"CreationTime"` + ETag *azcore.ETag `xml:"Etag"` + LastAccessTime *time.Time `xml:"LastAccessTime"` + LastModified *time.Time `xml:"Last-Modified"` + LastWriteTime *time.Time `xml:"LastWriteTime"` +} + +// FileRange - An Azure Storage file range. +type FileRange struct { + // REQUIRED; End of the range. + End *int64 `xml:"End"` + + // REQUIRED; Start of the range. + Start *int64 `xml:"Start"` +} + +// FilesAndDirectoriesListSegment - Abstract for entries that can be listed from Directory. +type FilesAndDirectoriesListSegment struct { + // REQUIRED + Directories []*Directory `xml:"Directory"` + + // REQUIRED + Files []*File `xml:"File"` +} + +// Handle - A listed Azure Storage handle item. +type Handle struct { + // REQUIRED; Client IP that opened the handle + ClientIP *string `xml:"ClientIp"` + + // REQUIRED; FileId uniquely identifies the file or directory. + FileID *string `xml:"FileId"` + + // REQUIRED; XSMB service handle ID + ID *string `xml:"HandleId"` + + // REQUIRED; Time when the session that previously opened the handle has last been reconnected. (UTC) + OpenTime *time.Time `xml:"OpenTime"` + + // REQUIRED; File or directory name including full path starting from share root + Path *string `xml:"Path"` + + // REQUIRED; SMB session ID in context of which the file handle was opened + SessionID *string `xml:"SessionId"` + + // Time handle was last connected to (UTC) + LastReconnectTime *time.Time `xml:"LastReconnectTime"` + + // ParentId uniquely identifies the parent directory of the object. + ParentID *string `xml:"ParentId"` +} + +// LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +type LeaseAccessConditions struct { + // If specified, the operation only succeeds if the resource's lease is active and matches this ID. + LeaseID *string +} + +// ListFilesAndDirectoriesSegmentResponse - An enumeration of directories and files. +type ListFilesAndDirectoriesSegmentResponse struct { + // REQUIRED + DirectoryPath *string `xml:"DirectoryPath,attr"` + + // REQUIRED + NextMarker *string `xml:"NextMarker"` + + // REQUIRED + Prefix *string `xml:"Prefix"` + + // REQUIRED; Abstract for entries that can be listed from Directory. + Segment *FilesAndDirectoriesListSegment `xml:"Entries"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + + // REQUIRED + ShareName *string `xml:"ShareName,attr"` + DirectoryID *string `xml:"DirectoryId"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + ShareSnapshot *string `xml:"ShareSnapshot,attr"` +} + +// ListHandlesResponse - An enumeration of handles. +type ListHandlesResponse struct { + // REQUIRED + NextMarker *string `xml:"NextMarker"` + Handles []*Handle `xml:"Entries>Handle"` +} + +// ListSharesResponse - An enumeration of shares. +type ListSharesResponse struct { + // REQUIRED + NextMarker *string `xml:"NextMarker"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + Prefix *string `xml:"Prefix"` + Shares []*Share `xml:"Shares>Share"` +} + +// Metrics - Storage Analytics metrics for file service. +type Metrics struct { + // REQUIRED; Indicates whether metrics are enabled for the File service. + Enabled *bool `xml:"Enabled"` + + // REQUIRED; The version of Storage Analytics to configure. + Version *string `xml:"Version"` + + // Indicates whether metrics should generate summary statistics for called API operations. + IncludeAPIs *bool `xml:"IncludeAPIs"` + + // The retention policy. + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` +} + +// ProtocolSettings - Protocol settings +type ProtocolSettings struct { + // Settings for SMB protocol. + Smb *SMBSettings `xml:"SMB"` +} + +// RetentionPolicy - The retention policy. +type RetentionPolicy struct { + // REQUIRED; Indicates whether a retention policy is enabled for the File service. If false, metrics data is retained, and + // the user is responsible for deleting it. + Enabled *bool `xml:"Enabled"` + + // Indicates the number of days that metrics data should be retained. All data older than this value will be deleted. Metrics + // data is deleted on a best-effort basis after the retention period expires. + Days *int32 `xml:"Days"` +} + +// SMBSettings - Settings for SMB protocol. +type SMBSettings struct { + // Settings for SMB Multichannel. + Multichannel *SMBMultichannel `xml:"Multichannel"` +} + +// ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +type ServiceClientGetPropertiesOptions struct { + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ServiceClientListSharesSegmentOptions contains the optional parameters for the ServiceClient.NewListSharesSegmentPager +// method. +type ServiceClientListSharesSegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListSharesIncludeType + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the response body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + // Specifies the maximum number of entries to return. If the request does not specify maxresults, or specifies a value greater + // than 5,000, the server will return up to 5,000 items. + Maxresults *int32 + // Filters the results to return only entries whose name begins with the specified prefix. + Prefix *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +type ServiceClientSetPropertiesOptions struct { + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// Share - A listed Azure Storage share item. +type Share struct { + // REQUIRED + Name *string `xml:"Name"` + + // REQUIRED; Properties of a share. + Properties *ShareProperties `xml:"Properties"` + Deleted *bool `xml:"Deleted"` + + // Dictionary of + Metadata map[string]*string `xml:"Metadata"` + Snapshot *string `xml:"Snapshot"` + Version *string `xml:"Version"` +} + +// ShareClientAcquireLeaseOptions contains the optional parameters for the ShareClient.AcquireLease method. +type ShareClientAcquireLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The File service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientBreakLeaseOptions contains the optional parameters for the ShareClient.BreakLease method. +type ShareClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientChangeLeaseOptions contains the optional parameters for the ShareClient.ChangeLease method. +type ShareClientChangeLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The File service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientCreateOptions contains the optional parameters for the ShareClient.Create method. +type ShareClientCreateOptions struct { + // Specifies the access tier of the share. + AccessTier *ShareAccessTier + // Protocols to enable on the share. + EnabledProtocols *string + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // Specifies the maximum size of the share, in gigabytes. + Quota *int32 + // Root squash to set on the share. Only valid for NFS shares. + RootSquash *ShareRootSquash + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientCreatePermissionOptions contains the optional parameters for the ShareClient.CreatePermission method. +type ShareClientCreatePermissionOptions struct { + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientCreateSnapshotOptions contains the optional parameters for the ShareClient.CreateSnapshot method. +type ShareClientCreateSnapshotOptions struct { + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientDeleteOptions contains the optional parameters for the ShareClient.Delete method. +type ShareClientDeleteOptions struct { + // Specifies the option include to delete the base share and all of its snapshots. + DeleteSnapshots *DeleteSnapshotsOptionType + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientGetAccessPolicyOptions contains the optional parameters for the ShareClient.GetAccessPolicy method. +type ShareClientGetAccessPolicyOptions struct { + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientGetPermissionOptions contains the optional parameters for the ShareClient.GetPermission method. +type ShareClientGetPermissionOptions struct { + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientGetPropertiesOptions contains the optional parameters for the ShareClient.GetProperties method. +type ShareClientGetPropertiesOptions struct { + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientGetStatisticsOptions contains the optional parameters for the ShareClient.GetStatistics method. +type ShareClientGetStatisticsOptions struct { + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientReleaseLeaseOptions contains the optional parameters for the ShareClient.ReleaseLease method. +type ShareClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientRenewLeaseOptions contains the optional parameters for the ShareClient.RenewLease method. +type ShareClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientRestoreOptions contains the optional parameters for the ShareClient.Restore method. +type ShareClientRestoreOptions struct { + // Specifies the name of the previously-deleted share. + DeletedShareName *string + // Specifies the version of the previously-deleted share. + DeletedShareVersion *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientSetAccessPolicyOptions contains the optional parameters for the ShareClient.SetAccessPolicy method. +type ShareClientSetAccessPolicyOptions struct { + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientSetMetadataOptions contains the optional parameters for the ShareClient.SetMetadata method. +type ShareClientSetMetadataOptions struct { + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientSetPropertiesOptions contains the optional parameters for the ShareClient.SetProperties method. +type ShareClientSetPropertiesOptions struct { + // Specifies the access tier of the share. + AccessTier *ShareAccessTier + // Specifies the maximum size of the share, in gigabytes. + Quota *int32 + // Root squash to set on the share. Only valid for NFS shares. + RootSquash *ShareRootSquash + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareFileHTTPHeaders contains a group of parameters for the FileClient.Create method. +type ShareFileHTTPHeaders struct { + // Sets the file's cache control. The File service stores this value but does not use or modify it. + CacheControl *string + // Sets the file's Content-Disposition header. + ContentDisposition *string + // Specifies which content encodings have been applied to the file. + ContentEncoding *string + // Specifies the natural languages used by this resource. + ContentLanguage *string + // Sets the file's MD5 hash. + ContentMD5 []byte + // Sets the MIME content type of the file. The default type is 'application/octet-stream'. + ContentType *string +} + +// ShareFileRangeList - The list of file ranges +type ShareFileRangeList struct { + ClearRanges []*ClearRange `xml:"ClearRange"` + Ranges []*FileRange `xml:"Range"` +} + +// SharePermission - A permission (a security descriptor) at the share level. +type SharePermission struct { + // REQUIRED; The permission in the Security Descriptor Definition Language (SDDL). + Permission *string `json:"permission,omitempty"` +} + +// ShareProperties - Properties of a share. +type ShareProperties struct { + // REQUIRED + ETag *azcore.ETag `xml:"Etag"` + + // REQUIRED + LastModified *time.Time `xml:"Last-Modified"` + + // REQUIRED + Quota *int32 `xml:"Quota"` + AccessTier *string `xml:"AccessTier"` + AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"` + AccessTierTransitionState *string `xml:"AccessTierTransitionState"` + DeletedTime *time.Time `xml:"DeletedTime"` + EnabledProtocols *string `xml:"EnabledProtocols"` + + // When a share is leased, specifies whether the lease is of infinite or fixed duration. + LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` + + // Lease state of the share. + LeaseState *LeaseStateType `xml:"LeaseState"` + + // The current lease status of the share. + LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` + NextAllowedQuotaDowngradeTime *time.Time `xml:"NextAllowedQuotaDowngradeTime"` + ProvisionedEgressMBps *int32 `xml:"ProvisionedEgressMBps"` + ProvisionedIngressMBps *int32 `xml:"ProvisionedIngressMBps"` + ProvisionedIops *int32 `xml:"ProvisionedIops"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + RootSquash *ShareRootSquash `xml:"RootSquash"` +} + +// ShareStats - Stats for the share. +type ShareStats struct { + // REQUIRED; The approximate size of the data stored in bytes. Note that this value may not include all recently created or + // recently resized files. + ShareUsageBytes *int64 `xml:"ShareUsageBytes"` +} + +// SignedIdentifier - Signed identifier. +type SignedIdentifier struct { + // REQUIRED; A unique id. + ID *string `xml:"Id"` + + // The access policy. + AccessPolicy *AccessPolicy `xml:"AccessPolicy"` +} + +// SMBMultichannel - Settings for SMB multichannel +type SMBMultichannel struct { + // If SMB multichannel is enabled. + Enabled *bool `xml:"Enabled"` +} + +// SourceModifiedAccessConditions contains a group of parameters for the FileClient.UploadRangeFromURL method. +type SourceModifiedAccessConditions struct { + // Specify the crc64 value to operate only on range with a matching crc64 checksum. + SourceIfMatchCRC64 []byte + // Specify the crc64 value to operate only on range without a matching crc64 checksum. + SourceIfNoneMatchCRC64 []byte +} + +type StorageError struct { + Message *string `json:"Message,omitempty"` +} + +// StorageServiceProperties - Storage service properties. +type StorageServiceProperties struct { + // The set of CORS rules. + CORS []*CORSRule `xml:"Cors>CorsRule"` + + // A summary of request statistics grouped by API in hourly aggregates for files. + HourMetrics *Metrics `xml:"HourMetrics"` + + // A summary of request statistics grouped by API in minute aggregates for files. + MinuteMetrics *Metrics `xml:"MinuteMetrics"` + + // Protocol settings + Protocol *ProtocolSettings `xml:"ProtocolSettings"` +} diff --git a/sdk/storage/azfile/internal/generated/zz_models_serde.go b/sdk/storage/azfile/internal/generated/zz_models_serde.go new file mode 100644 index 000000000000..7f837baac65c --- /dev/null +++ b/sdk/storage/azfile/internal/generated/zz_models_serde.go @@ -0,0 +1,344 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "reflect" + "time" +) + +// MarshalXML implements the xml.Marshaller interface for type AccessPolicy. +func (a AccessPolicy) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias AccessPolicy + aux := &struct { + *alias + Expiry *timeRFC3339 `xml:"Expiry"` + Start *timeRFC3339 `xml:"Start"` + }{ + alias: (*alias)(&a), + Expiry: (*timeRFC3339)(a.Expiry), + Start: (*timeRFC3339)(a.Start), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type AccessPolicy. +func (a *AccessPolicy) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias AccessPolicy + aux := &struct { + *alias + Expiry *timeRFC3339 `xml:"Expiry"` + Start *timeRFC3339 `xml:"Start"` + }{ + alias: (*alias)(a), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + a.Expiry = (*time.Time)(aux.Expiry) + a.Start = (*time.Time)(aux.Start) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type FileProperty. +func (f FileProperty) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias FileProperty + aux := &struct { + *alias + ChangeTime *timeRFC3339 `xml:"ChangeTime"` + CreationTime *timeRFC3339 `xml:"CreationTime"` + LastAccessTime *timeRFC3339 `xml:"LastAccessTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + LastWriteTime *timeRFC3339 `xml:"LastWriteTime"` + }{ + alias: (*alias)(&f), + ChangeTime: (*timeRFC3339)(f.ChangeTime), + CreationTime: (*timeRFC3339)(f.CreationTime), + LastAccessTime: (*timeRFC3339)(f.LastAccessTime), + LastModified: (*timeRFC1123)(f.LastModified), + LastWriteTime: (*timeRFC3339)(f.LastWriteTime), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type FileProperty. +func (f *FileProperty) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias FileProperty + aux := &struct { + *alias + ChangeTime *timeRFC3339 `xml:"ChangeTime"` + CreationTime *timeRFC3339 `xml:"CreationTime"` + LastAccessTime *timeRFC3339 `xml:"LastAccessTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + LastWriteTime *timeRFC3339 `xml:"LastWriteTime"` + }{ + alias: (*alias)(f), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + f.ChangeTime = (*time.Time)(aux.ChangeTime) + f.CreationTime = (*time.Time)(aux.CreationTime) + f.LastAccessTime = (*time.Time)(aux.LastAccessTime) + f.LastModified = (*time.Time)(aux.LastModified) + f.LastWriteTime = (*time.Time)(aux.LastWriteTime) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type FilesAndDirectoriesListSegment. +func (f FilesAndDirectoriesListSegment) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias FilesAndDirectoriesListSegment + aux := &struct { + *alias + Directories *[]*Directory `xml:"Directory"` + Files *[]*File `xml:"File"` + }{ + alias: (*alias)(&f), + } + if f.Directories != nil { + aux.Directories = &f.Directories + } + if f.Files != nil { + aux.Files = &f.Files + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type Handle. +func (h Handle) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias Handle + aux := &struct { + *alias + LastReconnectTime *timeRFC1123 `xml:"LastReconnectTime"` + OpenTime *timeRFC1123 `xml:"OpenTime"` + }{ + alias: (*alias)(&h), + LastReconnectTime: (*timeRFC1123)(h.LastReconnectTime), + OpenTime: (*timeRFC1123)(h.OpenTime), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type Handle. +func (h *Handle) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias Handle + aux := &struct { + *alias + LastReconnectTime *timeRFC1123 `xml:"LastReconnectTime"` + OpenTime *timeRFC1123 `xml:"OpenTime"` + }{ + alias: (*alias)(h), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + h.LastReconnectTime = (*time.Time)(aux.LastReconnectTime) + h.OpenTime = (*time.Time)(aux.OpenTime) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ListHandlesResponse. +func (l ListHandlesResponse) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias ListHandlesResponse + aux := &struct { + *alias + Handles *[]*Handle `xml:"Entries>Handle"` + }{ + alias: (*alias)(&l), + } + if l.Handles != nil { + aux.Handles = &l.Handles + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type ListSharesResponse. +func (l ListSharesResponse) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias ListSharesResponse + aux := &struct { + *alias + Shares *[]*Share `xml:"Shares>Share"` + }{ + alias: (*alias)(&l), + } + if l.Shares != nil { + aux.Shares = &l.Shares + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type Share. +func (s *Share) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias Share + aux := &struct { + *alias + Metadata additionalProperties `xml:"Metadata"` + }{ + alias: (*alias)(s), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + s.Metadata = (map[string]*string)(aux.Metadata) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ShareFileRangeList. +func (s ShareFileRangeList) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias ShareFileRangeList + aux := &struct { + *alias + ClearRanges *[]*ClearRange `xml:"ClearRange"` + Ranges *[]*FileRange `xml:"Range"` + }{ + alias: (*alias)(&s), + } + if s.ClearRanges != nil { + aux.ClearRanges = &s.ClearRanges + } + if s.Ranges != nil { + aux.Ranges = &s.Ranges + } + return enc.EncodeElement(aux, start) +} + +// MarshalJSON implements the json.Marshaller interface for type SharePermission. +func (s SharePermission) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "permission", s.Permission) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SharePermission. +func (s *SharePermission) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "permission": + err = unpopulate(val, "Permission", &s.Permission) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ShareProperties. +func (s ShareProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias ShareProperties + aux := &struct { + *alias + AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + NextAllowedQuotaDowngradeTime *timeRFC1123 `xml:"NextAllowedQuotaDowngradeTime"` + }{ + alias: (*alias)(&s), + AccessTierChangeTime: (*timeRFC1123)(s.AccessTierChangeTime), + DeletedTime: (*timeRFC1123)(s.DeletedTime), + LastModified: (*timeRFC1123)(s.LastModified), + NextAllowedQuotaDowngradeTime: (*timeRFC1123)(s.NextAllowedQuotaDowngradeTime), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type ShareProperties. +func (s *ShareProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias ShareProperties + aux := &struct { + *alias + AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + NextAllowedQuotaDowngradeTime *timeRFC1123 `xml:"NextAllowedQuotaDowngradeTime"` + }{ + alias: (*alias)(s), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + s.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime) + s.DeletedTime = (*time.Time)(aux.DeletedTime) + s.LastModified = (*time.Time)(aux.LastModified) + s.NextAllowedQuotaDowngradeTime = (*time.Time)(aux.NextAllowedQuotaDowngradeTime) + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type StorageError. +func (s StorageError) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "Message", s.Message) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type StorageError. +func (s *StorageError) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "Message": + err = unpopulate(val, "Message", &s.Message) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type StorageServiceProperties. +func (s StorageServiceProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias StorageServiceProperties + aux := &struct { + *alias + CORS *[]*CORSRule `xml:"Cors>CorsRule"` + }{ + alias: (*alias)(&s), + } + if s.CORS != nil { + aux.CORS = &s.CORS + } + return enc.EncodeElement(aux, start) +} + +func populate(m map[string]any, k string, v any) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else if !reflect.ValueOf(v).IsNil() { + m[k] = v + } +} + +func unpopulate(data json.RawMessage, fn string, v any) error { + if data == nil { + return nil + } + if err := json.Unmarshal(data, v); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + return nil +} diff --git a/sdk/storage/azfile/internal/generated/zz_response_types.go b/sdk/storage/azfile/internal/generated/zz_response_types.go new file mode 100644 index 000000000000..be6bf1f60562 --- /dev/null +++ b/sdk/storage/azfile/internal/generated/zz_response_types.go @@ -0,0 +1,1189 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "io" + "time" +) + +// DirectoryClientCreateResponse contains the response from method DirectoryClient.Create. +type DirectoryClientCreateResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // FileAttributes contains the information returned from the x-ms-file-attributes header response. + FileAttributes *string + + // FileChangeTime contains the information returned from the x-ms-file-change-time header response. + FileChangeTime *time.Time + + // FileCreationTime contains the information returned from the x-ms-file-creation-time header response. + FileCreationTime *time.Time + + // ID contains the information returned from the x-ms-file-id header response. + ID *string + + // FileLastWriteTime contains the information returned from the x-ms-file-last-write-time header response. + FileLastWriteTime *time.Time + + // ParentID contains the information returned from the x-ms-file-parent-id header response. + ParentID *string + + // FilePermissionKey contains the information returned from the x-ms-file-permission-key header response. + FilePermissionKey *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// DirectoryClientDeleteResponse contains the response from method DirectoryClient.Delete. +type DirectoryClientDeleteResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// DirectoryClientForceCloseHandlesResponse contains the response from method DirectoryClient.ForceCloseHandles. +type DirectoryClientForceCloseHandlesResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // Marker contains the information returned from the x-ms-marker header response. + Marker *string + + // NumberOfHandlesClosed contains the information returned from the x-ms-number-of-handles-closed header response. + NumberOfHandlesClosed *int32 + + // NumberOfHandlesFailedToClose contains the information returned from the x-ms-number-of-handles-failed header response. + NumberOfHandlesFailedToClose *int32 + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// DirectoryClientGetPropertiesResponse contains the response from method DirectoryClient.GetProperties. +type DirectoryClientGetPropertiesResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // FileAttributes contains the information returned from the x-ms-file-attributes header response. + FileAttributes *string + + // FileChangeTime contains the information returned from the x-ms-file-change-time header response. + FileChangeTime *time.Time + + // FileCreationTime contains the information returned from the x-ms-file-creation-time header response. + FileCreationTime *time.Time + + // ID contains the information returned from the x-ms-file-id header response. + ID *string + + // FileLastWriteTime contains the information returned from the x-ms-file-last-write-time header response. + FileLastWriteTime *time.Time + + // ParentID contains the information returned from the x-ms-file-parent-id header response. + ParentID *string + + // FilePermissionKey contains the information returned from the x-ms-file-permission-key header response. + FilePermissionKey *string + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// DirectoryClientListFilesAndDirectoriesSegmentResponse contains the response from method DirectoryClient.NewListFilesAndDirectoriesSegmentPager. +type DirectoryClientListFilesAndDirectoriesSegmentResponse struct { + ListFilesAndDirectoriesSegmentResponse + // ContentType contains the information returned from the Content-Type header response. + ContentType *string `xml:"ContentType"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// DirectoryClientListHandlesResponse contains the response from method DirectoryClient.ListHandles. +type DirectoryClientListHandlesResponse struct { + ListHandlesResponse + // ContentType contains the information returned from the Content-Type header response. + ContentType *string `xml:"ContentType"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// DirectoryClientSetMetadataResponse contains the response from method DirectoryClient.SetMetadata. +type DirectoryClientSetMetadataResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// DirectoryClientSetPropertiesResponse contains the response from method DirectoryClient.SetProperties. +type DirectoryClientSetPropertiesResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // FileAttributes contains the information returned from the x-ms-file-attributes header response. + FileAttributes *string + + // FileChangeTime contains the information returned from the x-ms-file-change-time header response. + FileChangeTime *time.Time + + // FileCreationTime contains the information returned from the x-ms-file-creation-time header response. + FileCreationTime *time.Time + + // ID contains the information returned from the x-ms-file-id header response. + ID *string + + // FileLastWriteTime contains the information returned from the x-ms-file-last-write-time header response. + FileLastWriteTime *time.Time + + // ParentID contains the information returned from the x-ms-file-parent-id header response. + ParentID *string + + // FilePermissionKey contains the information returned from the x-ms-file-permission-key header response. + FilePermissionKey *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientAbortCopyResponse contains the response from method FileClient.AbortCopy. +type FileClientAbortCopyResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientAcquireLeaseResponse contains the response from method FileClient.AcquireLease. +type FileClientAcquireLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientBreakLeaseResponse contains the response from method FileClient.BreakLease. +type FileClientBreakLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientChangeLeaseResponse contains the response from method FileClient.ChangeLease. +type FileClientChangeLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientCreateResponse contains the response from method FileClient.Create. +type FileClientCreateResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // FileAttributes contains the information returned from the x-ms-file-attributes header response. + FileAttributes *string + + // FileChangeTime contains the information returned from the x-ms-file-change-time header response. + FileChangeTime *time.Time + + // FileCreationTime contains the information returned from the x-ms-file-creation-time header response. + FileCreationTime *time.Time + + // ID contains the information returned from the x-ms-file-id header response. + ID *string + + // FileLastWriteTime contains the information returned from the x-ms-file-last-write-time header response. + FileLastWriteTime *time.Time + + // ParentID contains the information returned from the x-ms-file-parent-id header response. + ParentID *string + + // FilePermissionKey contains the information returned from the x-ms-file-permission-key header response. + FilePermissionKey *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientDeleteResponse contains the response from method FileClient.Delete. +type FileClientDeleteResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientDownloadResponse contains the response from method FileClient.Download. +type FileClientDownloadResponse struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // Body contains the streaming response. + Body io.ReadCloser + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentRange contains the information returned from the Content-Range header response. + ContentRange *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // FileAttributes contains the information returned from the x-ms-file-attributes header response. + FileAttributes *string + + // FileChangeTime contains the information returned from the x-ms-file-change-time header response. + FileChangeTime *time.Time + + // FileContentMD5 contains the information returned from the x-ms-content-md5 header response. + FileContentMD5 []byte + + // FileCreationTime contains the information returned from the x-ms-file-creation-time header response. + FileCreationTime *time.Time + + // ID contains the information returned from the x-ms-file-id header response. + ID *string + + // FileLastWriteTime contains the information returned from the x-ms-file-last-write-time header response. + FileLastWriteTime *time.Time + + // ParentID contains the information returned from the x-ms-file-parent-id header response. + ParentID *string + + // FilePermissionKey contains the information returned from the x-ms-file-permission-key header response. + FilePermissionKey *string + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientForceCloseHandlesResponse contains the response from method FileClient.ForceCloseHandles. +type FileClientForceCloseHandlesResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // Marker contains the information returned from the x-ms-marker header response. + Marker *string + + // NumberOfHandlesClosed contains the information returned from the x-ms-number-of-handles-closed header response. + NumberOfHandlesClosed *int32 + + // NumberOfHandlesFailedToClose contains the information returned from the x-ms-number-of-handles-failed header response. + NumberOfHandlesFailedToClose *int32 + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientGetPropertiesResponse contains the response from method FileClient.GetProperties. +type FileClientGetPropertiesResponse struct { + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // FileAttributes contains the information returned from the x-ms-file-attributes header response. + FileAttributes *string + + // FileChangeTime contains the information returned from the x-ms-file-change-time header response. + FileChangeTime *time.Time + + // FileCreationTime contains the information returned from the x-ms-file-creation-time header response. + FileCreationTime *time.Time + + // ID contains the information returned from the x-ms-file-id header response. + ID *string + + // FileLastWriteTime contains the information returned from the x-ms-file-last-write-time header response. + FileLastWriteTime *time.Time + + // ParentID contains the information returned from the x-ms-file-parent-id header response. + ParentID *string + + // FilePermissionKey contains the information returned from the x-ms-file-permission-key header response. + FilePermissionKey *string + + // FileType contains the information returned from the x-ms-type header response. + FileType *string + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientGetRangeListResponse contains the response from method FileClient.GetRangeList. +type FileClientGetRangeListResponse struct { + ShareFileRangeList + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag `xml:"ETag"` + + // FileContentLength contains the information returned from the x-ms-content-length header response. + FileContentLength *int64 `xml:"FileContentLength"` + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time `xml:"LastModified"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// FileClientListHandlesResponse contains the response from method FileClient.ListHandles. +type FileClientListHandlesResponse struct { + ListHandlesResponse + // ContentType contains the information returned from the Content-Type header response. + ContentType *string `xml:"ContentType"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// FileClientReleaseLeaseResponse contains the response from method FileClient.ReleaseLease. +type FileClientReleaseLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientSetHTTPHeadersResponse contains the response from method FileClient.SetHTTPHeaders. +type FileClientSetHTTPHeadersResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // FileAttributes contains the information returned from the x-ms-file-attributes header response. + FileAttributes *string + + // FileChangeTime contains the information returned from the x-ms-file-change-time header response. + FileChangeTime *time.Time + + // FileCreationTime contains the information returned from the x-ms-file-creation-time header response. + FileCreationTime *time.Time + + // ID contains the information returned from the x-ms-file-id header response. + ID *string + + // FileLastWriteTime contains the information returned from the x-ms-file-last-write-time header response. + FileLastWriteTime *time.Time + + // ParentID contains the information returned from the x-ms-file-parent-id header response. + ParentID *string + + // FilePermissionKey contains the information returned from the x-ms-file-permission-key header response. + FilePermissionKey *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientSetMetadataResponse contains the response from method FileClient.SetMetadata. +type FileClientSetMetadataResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientStartCopyResponse contains the response from method FileClient.StartCopy. +type FileClientStartCopyResponse struct { + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientUploadRangeFromURLResponse contains the response from method FileClient.UploadRangeFromURL. +type FileClientUploadRangeFromURLResponse struct { + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// FileClientUploadRangeResponse contains the response from method FileClient.UploadRange. +type FileClientUploadRangeResponse struct { + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientGetPropertiesResponse contains the response from method ServiceClient.GetProperties. +type ServiceClientGetPropertiesResponse struct { + StorageServiceProperties + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ServiceClientListSharesSegmentResponse contains the response from method ServiceClient.NewListSharesSegmentPager. +type ServiceClientListSharesSegmentResponse struct { + ListSharesResponse + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ServiceClientSetPropertiesResponse contains the response from method ServiceClient.SetProperties. +type ServiceClientSetPropertiesResponse struct { + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientAcquireLeaseResponse contains the response from method ShareClient.AcquireLease. +type ShareClientAcquireLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientBreakLeaseResponse contains the response from method ShareClient.BreakLease. +type ShareClientBreakLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // LeaseTime contains the information returned from the x-ms-lease-time header response. + LeaseTime *int32 + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientChangeLeaseResponse contains the response from method ShareClient.ChangeLease. +type ShareClientChangeLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientCreatePermissionResponse contains the response from method ShareClient.CreatePermission. +type ShareClientCreatePermissionResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // FilePermissionKey contains the information returned from the x-ms-file-permission-key header response. + FilePermissionKey *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientCreateResponse contains the response from method ShareClient.Create. +type ShareClientCreateResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientCreateSnapshotResponse contains the response from method ShareClient.CreateSnapshot. +type ShareClientCreateSnapshotResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Snapshot contains the information returned from the x-ms-snapshot header response. + Snapshot *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientDeleteResponse contains the response from method ShareClient.Delete. +type ShareClientDeleteResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientGetAccessPolicyResponse contains the response from method ShareClient.GetAccessPolicy. +type ShareClientGetAccessPolicyResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag `xml:"ETag"` + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time `xml:"LastModified"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // A collection of signed identifiers. + SignedIdentifiers []*SignedIdentifier `xml:"SignedIdentifier"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ShareClientGetPermissionResponse contains the response from method ShareClient.GetPermission. +type ShareClientGetPermissionResponse struct { + SharePermission + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientGetPropertiesResponse contains the response from method ShareClient.GetProperties. +type ShareClientGetPropertiesResponse struct { + // AccessTier contains the information returned from the x-ms-access-tier header response. + AccessTier *string + + // AccessTierChangeTime contains the information returned from the x-ms-access-tier-change-time header response. + AccessTierChangeTime *time.Time + + // AccessTierTransitionState contains the information returned from the x-ms-access-tier-transition-state header response. + AccessTierTransitionState *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EnabledProtocols contains the information returned from the x-ms-enabled-protocols header response. + EnabledProtocols *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // NextAllowedQuotaDowngradeTime contains the information returned from the x-ms-share-next-allowed-quota-downgrade-time header + // response. + NextAllowedQuotaDowngradeTime *time.Time + + // ProvisionedEgressMBps contains the information returned from the x-ms-share-provisioned-egress-mbps header response. + ProvisionedEgressMBps *int32 + + // ProvisionedIngressMBps contains the information returned from the x-ms-share-provisioned-ingress-mbps header response. + ProvisionedIngressMBps *int32 + + // ProvisionedIops contains the information returned from the x-ms-share-provisioned-iops header response. + ProvisionedIops *int32 + + // Quota contains the information returned from the x-ms-share-quota header response. + Quota *int32 + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // RootSquash contains the information returned from the x-ms-root-squash header response. + RootSquash *ShareRootSquash + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientGetStatisticsResponse contains the response from method ShareClient.GetStatistics. +type ShareClientGetStatisticsResponse struct { + ShareStats + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag `xml:"ETag"` + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time `xml:"LastModified"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ShareClientReleaseLeaseResponse contains the response from method ShareClient.ReleaseLease. +type ShareClientReleaseLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientRenewLeaseResponse contains the response from method ShareClient.RenewLease. +type ShareClientRenewLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientRestoreResponse contains the response from method ShareClient.Restore. +type ShareClientRestoreResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientSetAccessPolicyResponse contains the response from method ShareClient.SetAccessPolicy. +type ShareClientSetAccessPolicyResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientSetMetadataResponse contains the response from method ShareClient.SetMetadata. +type ShareClientSetMetadataResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientSetPropertiesResponse contains the response from method ShareClient.SetProperties. +type ShareClientSetPropertiesResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} diff --git a/sdk/storage/azfile/internal/generated/zz_service_client.go b/sdk/storage/azfile/internal/generated/zz_service_client.go new file mode 100644 index 000000000000..efd5f4708912 --- /dev/null +++ b/sdk/storage/azfile/internal/generated/zz_service_client.go @@ -0,0 +1,195 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "strconv" + "strings" +) + +// ServiceClient contains the methods for the Service group. +// Don't use this type directly, use NewServiceClient() instead. +type ServiceClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewServiceClient creates a new instance of ServiceClient with the specified values. +// - endpoint - The URL of the service account, share, directory or file that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. +func NewServiceClient(endpoint string, pl runtime.Pipeline) *ServiceClient { + client := &ServiceClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// GetProperties - Gets the properties of a storage account's File service, including properties for Storage Analytics metrics +// and CORS (Cross-Origin Resource Sharing) rules. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +func (client *ServiceClient) GetProperties(ctx context.Context, options *ServiceClientGetPropertiesOptions) (ServiceClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options) + if err != nil { + return ServiceClientGetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ServiceClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ServiceClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *ServiceClient) getPropertiesCreateRequest(ctx context.Context, options *ServiceClientGetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *ServiceClient) getPropertiesHandleResponse(resp *http.Response) (ServiceClientGetPropertiesResponse, error) { + result := ServiceClientGetPropertiesResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceProperties); err != nil { + return ServiceClientGetPropertiesResponse{}, err + } + return result, nil +} + +// NewListSharesSegmentPager - The List Shares Segment operation returns a list of the shares and share snapshots under the +// specified account. +// +// Generated from API version 2020-10-02 +// - options - ServiceClientListSharesSegmentOptions contains the optional parameters for the ServiceClient.NewListSharesSegmentPager +// method. +// +// listSharesSegmentCreateRequest creates the ListSharesSegment request. +func (client *ServiceClient) ListSharesSegmentCreateRequest(ctx context.Context, options *ServiceClientListSharesSegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "list") + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// listSharesSegmentHandleResponse handles the ListSharesSegment response. +func (client *ServiceClient) ListSharesSegmentHandleResponse(resp *http.Response) (ServiceClientListSharesSegmentResponse, error) { + result := ServiceClientListSharesSegmentResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.ListSharesResponse); err != nil { + return ServiceClientListSharesSegmentResponse{}, err + } + return result, nil +} + +// SetProperties - Sets properties for a storage account's File service endpoint, including properties for Storage Analytics +// metrics and CORS (Cross-Origin Resource Sharing) rules. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - storageServiceProperties - The StorageService properties. +// - options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +func (client *ServiceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (ServiceClientSetPropertiesResponse, error) { + req, err := client.setPropertiesCreateRequest(ctx, storageServiceProperties, options) + if err != nil { + return ServiceClientSetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ServiceClientSetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return ServiceClientSetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.setPropertiesHandleResponse(resp) +} + +// setPropertiesCreateRequest creates the SetProperties request. +func (client *ServiceClient) setPropertiesCreateRequest(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, runtime.MarshalAsXML(req, storageServiceProperties) +} + +// setPropertiesHandleResponse handles the SetProperties response. +func (client *ServiceClient) setPropertiesHandleResponse(resp *http.Response) (ServiceClientSetPropertiesResponse, error) { + result := ServiceClientSetPropertiesResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} diff --git a/sdk/storage/azfile/internal/generated/zz_share_client.go b/sdk/storage/azfile/internal/generated/zz_share_client.go new file mode 100644 index 000000000000..1ba2fda44963 --- /dev/null +++ b/sdk/storage/azfile/internal/generated/zz_share_client.go @@ -0,0 +1,1437 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "encoding/xml" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "net/http" + "strconv" + "strings" + "time" +) + +// ShareClient contains the methods for the Share group. +// Don't use this type directly, use NewShareClient() instead. +type ShareClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewShareClient creates a new instance of ShareClient with the specified values. +// - endpoint - The URL of the service account, share, directory or file that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. +func NewShareClient(endpoint string, pl runtime.Pipeline) *ShareClient { + client := &ShareClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// AcquireLease - The Lease Share operation establishes and manages a lock on a share, or the specified snapshot for set and +// delete share operations. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite +// lease can be between 15 and 60 seconds. A lease duration cannot be changed using +// renew or change. +// - options - ShareClientAcquireLeaseOptions contains the optional parameters for the ShareClient.AcquireLease method. +func (client *ShareClient) AcquireLease(ctx context.Context, duration int32, options *ShareClientAcquireLeaseOptions) (ShareClientAcquireLeaseResponse, error) { + req, err := client.acquireLeaseCreateRequest(ctx, duration, options) + if err != nil { + return ShareClientAcquireLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientAcquireLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return ShareClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.acquireLeaseHandleResponse(resp) +} + +// acquireLeaseCreateRequest creates the AcquireLease request. +func (client *ShareClient) acquireLeaseCreateRequest(ctx context.Context, duration int32, options *ShareClientAcquireLeaseOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "share") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// acquireLeaseHandleResponse handles the AcquireLease response. +func (client *ShareClient) acquireLeaseHandleResponse(resp *http.Response) (ShareClientAcquireLeaseResponse, error) { + result := ShareClientAcquireLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientAcquireLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientAcquireLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// BreakLease - The Lease Share operation establishes and manages a lock on a share, or the specified snapshot for set and +// delete share operations. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ShareClientBreakLeaseOptions contains the optional parameters for the ShareClient.BreakLease method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *ShareClient) BreakLease(ctx context.Context, options *ShareClientBreakLeaseOptions, leaseAccessConditions *LeaseAccessConditions) (ShareClientBreakLeaseResponse, error) { + req, err := client.breakLeaseCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return ShareClientBreakLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientBreakLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return ShareClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.breakLeaseHandleResponse(resp) +} + +// breakLeaseCreateRequest creates the BreakLease request. +func (client *ShareClient) breakLeaseCreateRequest(ctx context.Context, options *ShareClientBreakLeaseOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "share") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"break"} + if options != nil && options.BreakPeriod != nil { + req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// breakLeaseHandleResponse handles the BreakLease response. +func (client *ShareClient) breakLeaseHandleResponse(resp *http.Response) (ShareClientBreakLeaseResponse, error) { + result := ShareClientBreakLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientBreakLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-time"); val != "" { + leaseTime32, err := strconv.ParseInt(val, 10, 32) + leaseTime := int32(leaseTime32) + if err != nil { + return ShareClientBreakLeaseResponse{}, err + } + result.LeaseTime = &leaseTime + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientBreakLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// ChangeLease - The Lease Share operation establishes and manages a lock on a share, or the specified snapshot for set and +// delete share operations. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - leaseID - Specifies the current lease ID on the resource. +// - options - ShareClientChangeLeaseOptions contains the optional parameters for the ShareClient.ChangeLease method. +func (client *ShareClient) ChangeLease(ctx context.Context, leaseID string, options *ShareClientChangeLeaseOptions) (ShareClientChangeLeaseResponse, error) { + req, err := client.changeLeaseCreateRequest(ctx, leaseID, options) + if err != nil { + return ShareClientChangeLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientChangeLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ShareClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.changeLeaseHandleResponse(resp) +} + +// changeLeaseCreateRequest creates the ChangeLease request. +func (client *ShareClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, options *ShareClientChangeLeaseOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "share") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"change"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// changeLeaseHandleResponse handles the ChangeLease response. +func (client *ShareClient) changeLeaseHandleResponse(resp *http.Response) (ShareClientChangeLeaseResponse, error) { + result := ShareClientChangeLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientChangeLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientChangeLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Create - Creates a new share under the specified account. If the share with the same name already exists, the operation +// fails. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ShareClientCreateOptions contains the optional parameters for the ShareClient.Create method. +func (client *ShareClient) Create(ctx context.Context, options *ShareClientCreateOptions) (ShareClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, options) + if err != nil { + return ShareClientCreateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return ShareClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *ShareClient) createCreateRequest(ctx context.Context, options *ShareClientCreateOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.Quota != nil { + req.Raw().Header["x-ms-share-quota"] = []string{strconv.FormatInt(int64(*options.Quota), 10)} + } + if options != nil && options.AccessTier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.AccessTier)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.EnabledProtocols != nil { + req.Raw().Header["x-ms-enabled-protocols"] = []string{*options.EnabledProtocols} + } + if options != nil && options.RootSquash != nil { + req.Raw().Header["x-ms-root-squash"] = []string{string(*options.RootSquash)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *ShareClient) createHandleResponse(resp *http.Response) (ShareClientCreateResponse, error) { + result := ShareClientCreateResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientCreateResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// CreatePermission - Create a permission (a security descriptor). +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - sharePermission - A permission (a security descriptor) at the share level. +// - options - ShareClientCreatePermissionOptions contains the optional parameters for the ShareClient.CreatePermission method. +func (client *ShareClient) CreatePermission(ctx context.Context, sharePermission SharePermission, options *ShareClientCreatePermissionOptions) (ShareClientCreatePermissionResponse, error) { + req, err := client.createPermissionCreateRequest(ctx, sharePermission, options) + if err != nil { + return ShareClientCreatePermissionResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientCreatePermissionResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return ShareClientCreatePermissionResponse{}, runtime.NewResponseError(resp) + } + return client.createPermissionHandleResponse(resp) +} + +// createPermissionCreateRequest creates the CreatePermission request. +func (client *ShareClient) createPermissionCreateRequest(ctx context.Context, sharePermission SharePermission, options *ShareClientCreatePermissionOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + reqQP.Set("comp", "filepermission") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, runtime.MarshalAsJSON(req, sharePermission) +} + +// createPermissionHandleResponse handles the CreatePermission response. +func (client *ShareClient) createPermissionHandleResponse(resp *http.Response) (ShareClientCreatePermissionResponse, error) { + result := ShareClientCreatePermissionResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientCreatePermissionResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-file-permission-key"); val != "" { + result.FilePermissionKey = &val + } + return result, nil +} + +// CreateSnapshot - Creates a read-only snapshot of a share. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ShareClientCreateSnapshotOptions contains the optional parameters for the ShareClient.CreateSnapshot method. +func (client *ShareClient) CreateSnapshot(ctx context.Context, options *ShareClientCreateSnapshotOptions) (ShareClientCreateSnapshotResponse, error) { + req, err := client.createSnapshotCreateRequest(ctx, options) + if err != nil { + return ShareClientCreateSnapshotResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientCreateSnapshotResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return ShareClientCreateSnapshotResponse{}, runtime.NewResponseError(resp) + } + return client.createSnapshotHandleResponse(resp) +} + +// createSnapshotCreateRequest creates the CreateSnapshot request. +func (client *ShareClient) createSnapshotCreateRequest(ctx context.Context, options *ShareClientCreateSnapshotOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + reqQP.Set("comp", "snapshot") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createSnapshotHandleResponse handles the CreateSnapshot response. +func (client *ShareClient) createSnapshotHandleResponse(resp *http.Response) (ShareClientCreateSnapshotResponse, error) { + result := ShareClientCreateSnapshotResponse{} + if val := resp.Header.Get("x-ms-snapshot"); val != "" { + result.Snapshot = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientCreateSnapshotResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientCreateSnapshotResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Delete - Operation marks the specified share or share snapshot for deletion. The share or share snapshot and any files +// contained within it are later deleted during garbage collection. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ShareClientDeleteOptions contains the optional parameters for the ShareClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *ShareClient) Delete(ctx context.Context, options *ShareClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions) (ShareClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return ShareClientDeleteResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return ShareClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return client.deleteHandleResponse(resp) +} + +// deleteCreateRequest creates the Delete request. +func (client *ShareClient) deleteCreateRequest(ctx context.Context, options *ShareClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.DeleteSnapshots != nil { + req.Raw().Header["x-ms-delete-snapshots"] = []string{string(*options.DeleteSnapshots)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// deleteHandleResponse handles the Delete response. +func (client *ShareClient) deleteHandleResponse(resp *http.Response) (ShareClientDeleteResponse, error) { + result := ShareClientDeleteResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientDeleteResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// GetAccessPolicy - Returns information about stored access policies specified on the share. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ShareClientGetAccessPolicyOptions contains the optional parameters for the ShareClient.GetAccessPolicy method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *ShareClient) GetAccessPolicy(ctx context.Context, options *ShareClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (ShareClientGetAccessPolicyResponse, error) { + req, err := client.getAccessPolicyCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return ShareClientGetAccessPolicyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientGetAccessPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ShareClientGetAccessPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.getAccessPolicyHandleResponse(resp) +} + +// getAccessPolicyCreateRequest creates the GetAccessPolicy request. +func (client *ShareClient) getAccessPolicyCreateRequest(ctx context.Context, options *ShareClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + reqQP.Set("comp", "acl") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getAccessPolicyHandleResponse handles the GetAccessPolicy response. +func (client *ShareClient) getAccessPolicyHandleResponse(resp *http.Response) (ShareClientGetAccessPolicyResponse, error) { + result := ShareClientGetAccessPolicyResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientGetAccessPolicyResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientGetAccessPolicyResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result); err != nil { + return ShareClientGetAccessPolicyResponse{}, err + } + return result, nil +} + +// GetPermission - Returns the permission (security descriptor) for a given key +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - filePermissionKey - Key of the permission to be set for the directory/file. +// - options - ShareClientGetPermissionOptions contains the optional parameters for the ShareClient.GetPermission method. +func (client *ShareClient) GetPermission(ctx context.Context, filePermissionKey string, options *ShareClientGetPermissionOptions) (ShareClientGetPermissionResponse, error) { + req, err := client.getPermissionCreateRequest(ctx, filePermissionKey, options) + if err != nil { + return ShareClientGetPermissionResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientGetPermissionResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ShareClientGetPermissionResponse{}, runtime.NewResponseError(resp) + } + return client.getPermissionHandleResponse(resp) +} + +// getPermissionCreateRequest creates the GetPermission request. +func (client *ShareClient) getPermissionCreateRequest(ctx context.Context, filePermissionKey string, options *ShareClientGetPermissionOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + reqQP.Set("comp", "filepermission") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-file-permission-key"] = []string{filePermissionKey} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getPermissionHandleResponse handles the GetPermission response. +func (client *ShareClient) getPermissionHandleResponse(resp *http.Response) (ShareClientGetPermissionResponse, error) { + result := ShareClientGetPermissionResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientGetPermissionResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsJSON(resp, &result.SharePermission); err != nil { + return ShareClientGetPermissionResponse{}, err + } + return result, nil +} + +// GetProperties - Returns all user-defined metadata and system properties for the specified share or share snapshot. The +// data returned does not include the share's list of files. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ShareClientGetPropertiesOptions contains the optional parameters for the ShareClient.GetProperties method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *ShareClient) GetProperties(ctx context.Context, options *ShareClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ShareClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return ShareClientGetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ShareClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *ShareClient) getPropertiesCreateRequest(ctx context.Context, options *ShareClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *ShareClient) getPropertiesHandleResponse(resp *http.Response) (ShareClientGetPropertiesResponse, error) { + result := ShareClientGetPropertiesResponse{} + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientGetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientGetPropertiesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-share-quota"); val != "" { + quota32, err := strconv.ParseInt(val, 10, 32) + quota := int32(quota32) + if err != nil { + return ShareClientGetPropertiesResponse{}, err + } + result.Quota = "a + } + if val := resp.Header.Get("x-ms-share-provisioned-iops"); val != "" { + provisionedIops32, err := strconv.ParseInt(val, 10, 32) + provisionedIops := int32(provisionedIops32) + if err != nil { + return ShareClientGetPropertiesResponse{}, err + } + result.ProvisionedIops = &provisionedIops + } + if val := resp.Header.Get("x-ms-share-provisioned-ingress-mbps"); val != "" { + provisionedIngressMBps32, err := strconv.ParseInt(val, 10, 32) + provisionedIngressMBps := int32(provisionedIngressMBps32) + if err != nil { + return ShareClientGetPropertiesResponse{}, err + } + result.ProvisionedIngressMBps = &provisionedIngressMBps + } + if val := resp.Header.Get("x-ms-share-provisioned-egress-mbps"); val != "" { + provisionedEgressMBps32, err := strconv.ParseInt(val, 10, 32) + provisionedEgressMBps := int32(provisionedEgressMBps32) + if err != nil { + return ShareClientGetPropertiesResponse{}, err + } + result.ProvisionedEgressMBps = &provisionedEgressMBps + } + if val := resp.Header.Get("x-ms-share-next-allowed-quota-downgrade-time"); val != "" { + nextAllowedQuotaDowngradeTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientGetPropertiesResponse{}, err + } + result.NextAllowedQuotaDowngradeTime = &nextAllowedQuotaDowngradeTime + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + if val := resp.Header.Get("x-ms-access-tier"); val != "" { + result.AccessTier = &val + } + if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" { + accessTierChangeTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientGetPropertiesResponse{}, err + } + result.AccessTierChangeTime = &accessTierChangeTime + } + if val := resp.Header.Get("x-ms-access-tier-transition-state"); val != "" { + result.AccessTierTransitionState = &val + } + if val := resp.Header.Get("x-ms-enabled-protocols"); val != "" { + result.EnabledProtocols = &val + } + if val := resp.Header.Get("x-ms-root-squash"); val != "" { + result.RootSquash = (*ShareRootSquash)(&val) + } + return result, nil +} + +// GetStatistics - Retrieves statistics related to the share. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ShareClientGetStatisticsOptions contains the optional parameters for the ShareClient.GetStatistics method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *ShareClient) GetStatistics(ctx context.Context, options *ShareClientGetStatisticsOptions, leaseAccessConditions *LeaseAccessConditions) (ShareClientGetStatisticsResponse, error) { + req, err := client.getStatisticsCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return ShareClientGetStatisticsResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientGetStatisticsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ShareClientGetStatisticsResponse{}, runtime.NewResponseError(resp) + } + return client.getStatisticsHandleResponse(resp) +} + +// getStatisticsCreateRequest creates the GetStatistics request. +func (client *ShareClient) getStatisticsCreateRequest(ctx context.Context, options *ShareClientGetStatisticsOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + reqQP.Set("comp", "stats") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getStatisticsHandleResponse handles the GetStatistics response. +func (client *ShareClient) getStatisticsHandleResponse(resp *http.Response) (ShareClientGetStatisticsResponse, error) { + result := ShareClientGetStatisticsResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientGetStatisticsResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientGetStatisticsResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.ShareStats); err != nil { + return ShareClientGetStatisticsResponse{}, err + } + return result, nil +} + +// ReleaseLease - The Lease Share operation establishes and manages a lock on a share, or the specified snapshot for set and +// delete share operations. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - leaseID - Specifies the current lease ID on the resource. +// - options - ShareClientReleaseLeaseOptions contains the optional parameters for the ShareClient.ReleaseLease method. +func (client *ShareClient) ReleaseLease(ctx context.Context, leaseID string, options *ShareClientReleaseLeaseOptions) (ShareClientReleaseLeaseResponse, error) { + req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options) + if err != nil { + return ShareClientReleaseLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientReleaseLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ShareClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.releaseLeaseHandleResponse(resp) +} + +// releaseLeaseCreateRequest creates the ReleaseLease request. +func (client *ShareClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, options *ShareClientReleaseLeaseOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "share") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"release"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// releaseLeaseHandleResponse handles the ReleaseLease response. +func (client *ShareClient) releaseLeaseHandleResponse(resp *http.Response) (ShareClientReleaseLeaseResponse, error) { + result := ShareClientReleaseLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientReleaseLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientReleaseLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// RenewLease - The Lease Share operation establishes and manages a lock on a share, or the specified snapshot for set and +// delete share operations. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - leaseID - Specifies the current lease ID on the resource. +// - options - ShareClientRenewLeaseOptions contains the optional parameters for the ShareClient.RenewLease method. +func (client *ShareClient) RenewLease(ctx context.Context, leaseID string, options *ShareClientRenewLeaseOptions) (ShareClientRenewLeaseResponse, error) { + req, err := client.renewLeaseCreateRequest(ctx, leaseID, options) + if err != nil { + return ShareClientRenewLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientRenewLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ShareClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.renewLeaseHandleResponse(resp) +} + +// renewLeaseCreateRequest creates the RenewLease request. +func (client *ShareClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, options *ShareClientRenewLeaseOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "share") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"renew"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// renewLeaseHandleResponse handles the RenewLease response. +func (client *ShareClient) renewLeaseHandleResponse(resp *http.Response) (ShareClientRenewLeaseResponse, error) { + result := ShareClientRenewLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientRenewLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientRenewLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Restore - Restores a previously deleted Share. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ShareClientRestoreOptions contains the optional parameters for the ShareClient.Restore method. +func (client *ShareClient) Restore(ctx context.Context, options *ShareClientRestoreOptions) (ShareClientRestoreResponse, error) { + req, err := client.restoreCreateRequest(ctx, options) + if err != nil { + return ShareClientRestoreResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientRestoreResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return ShareClientRestoreResponse{}, runtime.NewResponseError(resp) + } + return client.restoreHandleResponse(resp) +} + +// restoreCreateRequest creates the Restore request. +func (client *ShareClient) restoreCreateRequest(ctx context.Context, options *ShareClientRestoreOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + reqQP.Set("comp", "undelete") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.DeletedShareName != nil { + req.Raw().Header["x-ms-deleted-share-name"] = []string{*options.DeletedShareName} + } + if options != nil && options.DeletedShareVersion != nil { + req.Raw().Header["x-ms-deleted-share-version"] = []string{*options.DeletedShareVersion} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// restoreHandleResponse handles the Restore response. +func (client *ShareClient) restoreHandleResponse(resp *http.Response) (ShareClientRestoreResponse, error) { + result := ShareClientRestoreResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientRestoreResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientRestoreResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetAccessPolicy - Sets a stored access policy for use with shared access signatures. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - shareACL - The ACL for the share. +// - options - ShareClientSetAccessPolicyOptions contains the optional parameters for the ShareClient.SetAccessPolicy method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *ShareClient) SetAccessPolicy(ctx context.Context, shareACL []*SignedIdentifier, options *ShareClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (ShareClientSetAccessPolicyResponse, error) { + req, err := client.setAccessPolicyCreateRequest(ctx, shareACL, options, leaseAccessConditions) + if err != nil { + return ShareClientSetAccessPolicyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientSetAccessPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ShareClientSetAccessPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.setAccessPolicyHandleResponse(resp) +} + +// setAccessPolicyCreateRequest creates the SetAccessPolicy request. +func (client *ShareClient) setAccessPolicyCreateRequest(ctx context.Context, shareACL []*SignedIdentifier, options *ShareClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + reqQP.Set("comp", "acl") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + type wrapper struct { + XMLName xml.Name `xml:"SignedIdentifiers"` + ShareACL *[]*SignedIdentifier `xml:"SignedIdentifier"` + } + return req, runtime.MarshalAsXML(req, wrapper{ShareACL: &shareACL}) +} + +// setAccessPolicyHandleResponse handles the SetAccessPolicy response. +func (client *ShareClient) setAccessPolicyHandleResponse(resp *http.Response) (ShareClientSetAccessPolicyResponse, error) { + result := ShareClientSetAccessPolicyResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientSetAccessPolicyResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientSetAccessPolicyResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetMetadata - Sets one or more user-defined name-value pairs for the specified share. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ShareClientSetMetadataOptions contains the optional parameters for the ShareClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *ShareClient) SetMetadata(ctx context.Context, options *ShareClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions) (ShareClientSetMetadataResponse, error) { + req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return ShareClientSetMetadataResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientSetMetadataResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ShareClientSetMetadataResponse{}, runtime.NewResponseError(resp) + } + return client.setMetadataHandleResponse(resp) +} + +// setMetadataCreateRequest creates the SetMetadata request. +func (client *ShareClient) setMetadataCreateRequest(ctx context.Context, options *ShareClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + reqQP.Set("comp", "metadata") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setMetadataHandleResponse handles the SetMetadata response. +func (client *ShareClient) setMetadataHandleResponse(resp *http.Response) (ShareClientSetMetadataResponse, error) { + result := ShareClientSetMetadataResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientSetMetadataResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientSetMetadataResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetProperties - Sets properties for the specified share. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ShareClientSetPropertiesOptions contains the optional parameters for the ShareClient.SetProperties method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *ShareClient) SetProperties(ctx context.Context, options *ShareClientSetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ShareClientSetPropertiesResponse, error) { + req, err := client.setPropertiesCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return ShareClientSetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientSetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ShareClientSetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.setPropertiesHandleResponse(resp) +} + +// setPropertiesCreateRequest creates the SetProperties request. +func (client *ShareClient) setPropertiesCreateRequest(ctx context.Context, options *ShareClientSetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.Quota != nil { + req.Raw().Header["x-ms-share-quota"] = []string{strconv.FormatInt(int64(*options.Quota), 10)} + } + if options != nil && options.AccessTier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.AccessTier)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.RootSquash != nil { + req.Raw().Header["x-ms-root-squash"] = []string{string(*options.RootSquash)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setPropertiesHandleResponse handles the SetProperties response. +func (client *ShareClient) setPropertiesHandleResponse(resp *http.Response) (ShareClientSetPropertiesResponse, error) { + result := ShareClientSetPropertiesResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientSetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientSetPropertiesResponse{}, err + } + result.Date = &date + } + return result, nil +} diff --git a/sdk/storage/azfile/internal/generated/zz_time_rfc1123.go b/sdk/storage/azfile/internal/generated/zz_time_rfc1123.go new file mode 100644 index 000000000000..4b4d51aa3994 --- /dev/null +++ b/sdk/storage/azfile/internal/generated/zz_time_rfc1123.go @@ -0,0 +1,43 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "strings" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` +) + +type timeRFC1123 time.Time + +func (t timeRFC1123) MarshalJSON() ([]byte, error) { + b := []byte(time.Time(t).Format(rfc1123JSON)) + return b, nil +} + +func (t timeRFC1123) MarshalText() ([]byte, error) { + b := []byte(time.Time(t).Format(time.RFC1123)) + return b, nil +} + +func (t *timeRFC1123) UnmarshalJSON(data []byte) error { + p, err := time.Parse(rfc1123JSON, strings.ToUpper(string(data))) + *t = timeRFC1123(p) + return err +} + +func (t *timeRFC1123) UnmarshalText(data []byte) error { + p, err := time.Parse(time.RFC1123, string(data)) + *t = timeRFC1123(p) + return err +} diff --git a/sdk/storage/azfile/internal/generated/zz_time_rfc3339.go b/sdk/storage/azfile/internal/generated/zz_time_rfc3339.go new file mode 100644 index 000000000000..1ce9d621164e --- /dev/null +++ b/sdk/storage/azfile/internal/generated/zz_time_rfc3339.go @@ -0,0 +1,59 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "regexp" + "strings" + "time" +) + +const ( + utcLayoutJSON = `"2006-01-02T15:04:05.999999999"` + utcLayout = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) + +type timeRFC3339 time.Time + +func (t timeRFC3339) MarshalJSON() (json []byte, err error) { + tt := time.Time(t) + return tt.MarshalJSON() +} + +func (t timeRFC3339) MarshalText() (text []byte, err error) { + tt := time.Time(t) + return tt.MarshalText() +} + +func (t *timeRFC3339) UnmarshalJSON(data []byte) error { + layout := utcLayoutJSON + if tzOffsetRegex.Match(data) { + layout = rfc3339JSON + } + return t.Parse(layout, string(data)) +} + +func (t *timeRFC3339) UnmarshalText(data []byte) (err error) { + layout := utcLayout + if tzOffsetRegex.Match(data) { + layout = time.RFC3339Nano + } + return t.Parse(layout, string(data)) +} + +func (t *timeRFC3339) Parse(layout, value string) error { + p, err := time.Parse(layout, strings.ToUpper(value)) + *t = timeRFC3339(p) + return err +} diff --git a/sdk/storage/azfile/internal/generated/zz_xml_helper.go b/sdk/storage/azfile/internal/generated/zz_xml_helper.go new file mode 100644 index 000000000000..144ea18e1aba --- /dev/null +++ b/sdk/storage/azfile/internal/generated/zz_xml_helper.go @@ -0,0 +1,41 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "encoding/xml" + "strings" +) + +type additionalProperties map[string]*string + +// UnmarshalXML implements the xml.Unmarshaler interface for additionalProperties. +func (ap *additionalProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + tokName := "" + for t, err := d.Token(); err == nil; t, err = d.Token() { + switch tt := t.(type) { + case xml.StartElement: + tokName = strings.ToLower(tt.Name.Local) + break + case xml.CharData: + if tokName == "" { + continue + } + if *ap == nil { + *ap = additionalProperties{} + } + s := string(tt) + (*ap)[tokName] = &s + tokName = "" + break + } + } + return nil +} diff --git a/sdk/storage/azfile/internal/shared/batch_transfer.go b/sdk/storage/azfile/internal/shared/batch_transfer.go new file mode 100644 index 000000000000..ec5541bfbb13 --- /dev/null +++ b/sdk/storage/azfile/internal/shared/batch_transfer.go @@ -0,0 +1,77 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "context" + "errors" +) + +// BatchTransferOptions identifies options used by doBatchTransfer. +type BatchTransferOptions struct { + TransferSize int64 + ChunkSize int64 + Concurrency uint16 + Operation func(ctx context.Context, offset int64, chunkSize int64) error + OperationName string +} + +// DoBatchTransfer helps to execute operations in a batch manner. +// Can be used by users to customize batch works (for other scenarios that the SDK does not provide) +func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { + if o.ChunkSize == 0 { + return errors.New("ChunkSize cannot be 0") + } + + if o.Concurrency == 0 { + o.Concurrency = 5 // default concurrency + } + + // Prepare and do parallel operations. + numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1) + operationChannel := make(chan func() error, o.Concurrency) // Create the channel that release 'concurrency' goroutines concurrently + operationResponseChannel := make(chan error, numChunks) // Holds each response + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Create the goroutines that process each operation (in parallel). + for g := uint16(0); g < o.Concurrency; g++ { + //grIndex := g + go func() { + for f := range operationChannel { + err := f() + operationResponseChannel <- err + } + }() + } + + // Add each chunk's operation to the channel. + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + curChunkSize := o.ChunkSize + + if chunkNum == numChunks-1 { // Last chunk + curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total + } + offset := int64(chunkNum) * o.ChunkSize + operationChannel <- func() error { + return o.Operation(ctx, offset, curChunkSize) + } + } + close(operationChannel) + + // Wait for the operations to complete. + var firstErr error = nil + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + responseError := <-operationResponseChannel + // record the first error (the original error which should cause the other chunks to fail with canceled context) + if responseError != nil && firstErr == nil { + cancel() // As soon as any operation fails, cancel all remaining operation calls + firstErr = responseError + } + } + return firstErr +} diff --git a/sdk/storage/azfile/internal/shared/bytes_writer.go b/sdk/storage/azfile/internal/shared/bytes_writer.go new file mode 100644 index 000000000000..8d4d35bdeffd --- /dev/null +++ b/sdk/storage/azfile/internal/shared/bytes_writer.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" +) + +type bytesWriter []byte + +func NewBytesWriter(b []byte) bytesWriter { + return b +} + +func (c bytesWriter) WriteAt(b []byte, off int64) (int, error) { + if off >= int64(len(c)) || off < 0 { + return 0, errors.New("offset value is out of range") + } + + n := copy(c[int(off):], b) + if n < len(b) { + return n, errors.New("not enough space for all bytes") + } + + return n, nil +} diff --git a/sdk/storage/azfile/internal/shared/bytes_writer_test.go b/sdk/storage/azfile/internal/shared/bytes_writer_test.go new file mode 100644 index 000000000000..5f1bc53c29ca --- /dev/null +++ b/sdk/storage/azfile/internal/shared/bytes_writer_test.go @@ -0,0 +1,37 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBytesWriter(t *testing.T) { + b := make([]byte, 10) + buffer := NewBytesWriter(b) + + count, err := buffer.WriteAt([]byte{1, 2}, 10) + require.Contains(t, err.Error(), "offset value is out of range") + require.Equal(t, count, 0) + + count, err = buffer.WriteAt([]byte{1, 2}, -1) + require.Contains(t, err.Error(), "offset value is out of range") + require.Equal(t, count, 0) + + count, err = buffer.WriteAt([]byte{1, 2}, 9) + require.Contains(t, err.Error(), "not enough space for all bytes") + require.Equal(t, count, 1) + require.Equal(t, bytes.Compare(b, []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 1}), 0) + + count, err = buffer.WriteAt([]byte{1, 2}, 8) + require.NoError(t, err) + require.Equal(t, count, 2) + require.Equal(t, bytes.Compare(b, []byte{0, 0, 0, 0, 0, 0, 0, 0, 1, 2}), 0) +} diff --git a/sdk/storage/azfile/internal/shared/section_writer.go b/sdk/storage/azfile/internal/shared/section_writer.go new file mode 100644 index 000000000000..c8528a2e3ed2 --- /dev/null +++ b/sdk/storage/azfile/internal/shared/section_writer.go @@ -0,0 +1,53 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" + "io" +) + +type SectionWriter struct { + Count int64 + Offset int64 + Position int64 + WriterAt io.WriterAt +} + +func NewSectionWriter(c io.WriterAt, off int64, count int64) *SectionWriter { + return &SectionWriter{ + Count: count, + Offset: off, + WriterAt: c, + } +} + +func (c *SectionWriter) Write(p []byte) (int, error) { + remaining := c.Count - c.Position + + if remaining <= 0 { + return 0, errors.New("end of section reached") + } + + slice := p + + if int64(len(slice)) > remaining { + slice = slice[:remaining] + } + + n, err := c.WriterAt.WriteAt(slice, c.Offset+c.Position) + c.Position += int64(n) + if err != nil { + return n, err + } + + if len(p) > n { + return n, errors.New("not enough space for all bytes") + } + + return n, nil +} diff --git a/sdk/storage/azfile/internal/shared/section_writer_test.go b/sdk/storage/azfile/internal/shared/section_writer_test.go new file mode 100644 index 000000000000..a1cf22da410a --- /dev/null +++ b/sdk/storage/azfile/internal/shared/section_writer_test.go @@ -0,0 +1,98 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "bytes" + "io" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSectionWriter(t *testing.T) { + b := [10]byte{} + buffer := NewBytesWriter(b[:]) + + section := NewSectionWriter(buffer, 0, 5) + require.Equal(t, section.Count, int64(5)) + require.Equal(t, section.Offset, int64(0)) + require.Equal(t, section.Position, int64(0)) + + count, err := section.Write([]byte{1, 2, 3}) + require.NoError(t, err) + require.Equal(t, count, 3) + require.Equal(t, section.Position, int64(3)) + require.Equal(t, b, [10]byte{1, 2, 3, 0, 0, 0, 0, 0, 0, 0}) + + count, err = section.Write([]byte{4, 5, 6}) + require.Contains(t, err.Error(), "not enough space for all bytes") + require.Equal(t, count, 2) + require.Equal(t, section.Position, int64(5)) + require.Equal(t, b, [10]byte{1, 2, 3, 4, 5, 0, 0, 0, 0, 0}) + + count, err = section.Write([]byte{6, 7, 8}) + require.Contains(t, err.Error(), "end of section reached") + require.Equal(t, count, 0) + require.Equal(t, section.Position, int64(5)) + require.Equal(t, b, [10]byte{1, 2, 3, 4, 5, 0, 0, 0, 0, 0}) + + // Intentionally create a section writer which will attempt to write + // outside the bounds of the buffer. + section = NewSectionWriter(buffer, 5, 6) + require.Equal(t, section.Count, int64(6)) + require.Equal(t, section.Offset, int64(5)) + require.Equal(t, section.Position, int64(0)) + + count, err = section.Write([]byte{6, 7, 8}) + require.NoError(t, err) + require.Equal(t, count, 3) + require.Equal(t, section.Position, int64(3)) + require.Equal(t, b, [10]byte{1, 2, 3, 4, 5, 6, 7, 8, 0, 0}) + + // Attempt to write past the end of the section. Since the underlying + // buffer rejects the write it gives the same error as in the normal case. + count, err = section.Write([]byte{9, 10, 11}) + require.Contains(t, err.Error(), "not enough space for all bytes") + require.Equal(t, count, 2) + require.Equal(t, section.Position, int64(5)) + require.Equal(t, b, [10]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) + + // Attempt to write past the end of the buffer. In this case the buffer + // rejects the write completely since it falls completely out of bounds. + count, err = section.Write([]byte{11, 12, 13}) + require.Contains(t, err.Error(), "offset value is out of range") + require.Equal(t, count, 0) + require.Equal(t, section.Position, int64(5)) + require.Equal(t, b, [10]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) +} + +func TestSectionWriterCopySrcDestEmpty(t *testing.T) { + input := make([]byte, 0) + reader := bytes.NewReader(input) + + output := make([]byte, 0) + buffer := NewBytesWriter(output) + section := NewSectionWriter(buffer, 0, 0) + + count, err := io.Copy(section, reader) + require.NoError(t, err) + require.Equal(t, count, int64(0)) +} + +func TestSectionWriterCopyDestEmpty(t *testing.T) { + input := make([]byte, 10) + reader := bytes.NewReader(input) + + output := make([]byte, 0) + buffer := NewBytesWriter(output) + section := NewSectionWriter(buffer, 0, 0) + + count, err := io.Copy(section, reader) + require.Contains(t, err.Error(), "end of section reached") + require.Equal(t, count, int64(0)) +} diff --git a/sdk/storage/azfile/internal/shared/shared.go b/sdk/storage/azfile/internal/shared/shared.go new file mode 100644 index 000000000000..9ef2a3396816 --- /dev/null +++ b/sdk/storage/azfile/internal/shared/shared.go @@ -0,0 +1,209 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "hash/crc64" + "io" + "net" + "strings" +) + +const ( + TokenScope = "https://storage.azure.com/.default" +) + +const ( + HeaderAuthorization = "Authorization" + HeaderXmsDate = "x-ms-date" + HeaderContentLength = "Content-Length" + HeaderContentEncoding = "Content-Encoding" + HeaderContentLanguage = "Content-Language" + HeaderContentType = "Content-Type" + HeaderContentMD5 = "Content-MD5" + HeaderIfModifiedSince = "If-Modified-Since" + HeaderIfMatch = "If-Match" + HeaderIfNoneMatch = "If-None-Match" + HeaderIfUnmodifiedSince = "If-Unmodified-Since" + HeaderRange = "Range" +) + +const StorageAnalyticsVersion = "1.0" + +const crc64Polynomial uint64 = 0x9A6C9329AC4BC9B5 + +var CRC64Table = crc64.MakeTable(crc64Polynomial) + +const ( + // DefaultFilePermissionString is a constant for all intents and purposes. + // Inherit inherits permissions from the parent folder (default when creating files/folders) + DefaultFilePermissionString = "inherit" + + // DefaultCurrentTimeString sets creation/last write times to now + DefaultCurrentTimeString = "now" + + // DefaultPreserveString preserves old permissions on the file/folder (default when updating properties) + DefaultPreserveString = "preserve" + + // FileAttributesNone is defaults for file attributes when creating file. + // This attribute is valid only when used alone. + FileAttributesNone = "None" + + // FileAttributesDirectory is defaults for file attributes when creating directory. + // The attribute that identifies a directory + FileAttributesDirectory = "Directory" +) + +func GetClientOptions[T any](o *T) *T { + if o == nil { + return new(T) + } + return o +} + +var errConnectionString = errors.New("connection string is either blank or malformed. The expected connection string " + + "should contain key value pairs separated by semicolons. For example 'DefaultEndpointsProtocol=https;AccountName=;" + + "AccountKey=;EndpointSuffix=core.windows.net'") + +type ParsedConnectionString struct { + ServiceURL string + AccountName string + AccountKey string +} + +func ParseConnectionString(connectionString string) (ParsedConnectionString, error) { + const ( + defaultScheme = "https" + defaultSuffix = "core.windows.net" + ) + + connStrMap := make(map[string]string) + connectionString = strings.TrimRight(connectionString, ";") + + splitString := strings.Split(connectionString, ";") + if len(splitString) == 0 { + return ParsedConnectionString{}, errConnectionString + } + for _, stringPart := range splitString { + parts := strings.SplitN(stringPart, "=", 2) + if len(parts) != 2 { + return ParsedConnectionString{}, errConnectionString + } + connStrMap[parts[0]] = parts[1] + } + + accountName, ok := connStrMap["AccountName"] + if !ok { + return ParsedConnectionString{}, errors.New("connection string missing AccountName") + } + + accountKey, ok := connStrMap["AccountKey"] + if !ok { + sharedAccessSignature, ok := connStrMap["SharedAccessSignature"] + if !ok { + return ParsedConnectionString{}, errors.New("connection string missing AccountKey and SharedAccessSignature") + } + return ParsedConnectionString{ + ServiceURL: fmt.Sprintf("%v://%v.file.%v/?%v", defaultScheme, accountName, defaultSuffix, sharedAccessSignature), + }, nil + } + + protocol, ok := connStrMap["DefaultEndpointsProtocol"] + if !ok { + protocol = defaultScheme + } + + suffix, ok := connStrMap["EndpointSuffix"] + if !ok { + suffix = defaultSuffix + } + + if fileEndpoint, ok := connStrMap["FileEndpoint"]; ok { + return ParsedConnectionString{ + ServiceURL: fileEndpoint, + AccountName: accountName, + AccountKey: accountKey, + }, nil + } + + return ParsedConnectionString{ + ServiceURL: fmt.Sprintf("%v://%v.file.%v", protocol, accountName, suffix), + AccountName: accountName, + AccountKey: accountKey, + }, nil +} + +// IsIPEndpointStyle checks if URL's host is IP, in this case the storage account endpoint will be composed as: +// http(s)://IP(:port)/storageaccount/share(||container||etc)/... +// As url's Host property, host could be both host or host:port +func IsIPEndpointStyle(host string) bool { + if host == "" { + return false + } + if h, _, err := net.SplitHostPort(host); err == nil { + host = h + } + // For IPv6, there could be case where SplitHostPort fails for cannot finding port. + // In this case, eliminate the '[' and ']' in the URL. + // For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732 + if host[0] == '[' && host[len(host)-1] == ']' { + host = host[1 : len(host)-1] + } + return net.ParseIP(host) != nil +} + +func GenerateLeaseID(leaseID *string) (*string, error) { + if leaseID == nil { + generatedUuid, err := uuid.New() + if err != nil { + return nil, err + } + leaseID = to.Ptr(generatedUuid.String()) + } + return leaseID, nil +} + +func ValidateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) { + if body == nil { // nil body is "logically" seekable to 0 and are 0 bytes long + return 0, nil + } + + err := validateSeekableStreamAt0(body) + if err != nil { + return 0, err + } + + count, err := body.Seek(0, io.SeekEnd) + if err != nil { + return 0, errors.New("body stream must be seekable") + } + + _, err = body.Seek(0, io.SeekStart) + if err != nil { + return 0, err + } + return count, nil +} + +// return an error if body is not a valid seekable stream at 0 +func validateSeekableStreamAt0(body io.ReadSeeker) error { + if body == nil { // nil body is "logically" seekable to 0 + return nil + } + if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil { + // Help detect programmer error + if err != nil { + return errors.New("body stream must be seekable") + } + return errors.New("body stream must be set to position 0") + } + return nil +} diff --git a/sdk/storage/azfile/internal/shared/shared_test.go b/sdk/storage/azfile/internal/shared/shared_test.go new file mode 100644 index 000000000000..1cd5da99469d --- /dev/null +++ b/sdk/storage/azfile/internal/shared/shared_test.go @@ -0,0 +1,95 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParseConnectionStringInvalid(t *testing.T) { + badConnectionStrings := []string{ + "", + "foobar", + "foo;bar;baz", + "foo=;bar=;", + "=", + ";", + "=;==", + "foobar=baz=foo", + } + + for _, badConnStr := range badConnectionStrings { + parsed, err := ParseConnectionString(badConnStr) + require.Error(t, err) + require.Zero(t, parsed) + } +} + +func TestParseConnectionString(t *testing.T) { + connStr := "DefaultEndpointsProtocol=https;AccountName=dummyaccount;AccountKey=secretkeykey;EndpointSuffix=core.windows.net" + parsed, err := ParseConnectionString(connStr) + require.NoError(t, err) + require.Equal(t, "https://dummyaccount.file.core.windows.net", parsed.ServiceURL) + require.Equal(t, "dummyaccount", parsed.AccountName) + require.Equal(t, "secretkeykey", parsed.AccountKey) +} + +func TestParseConnectionStringHTTP(t *testing.T) { + connStr := "DefaultEndpointsProtocol=http;AccountName=dummyaccount;AccountKey=secretkeykey;EndpointSuffix=core.windows.net" + parsed, err := ParseConnectionString(connStr) + require.NoError(t, err) + require.Equal(t, "http://dummyaccount.file.core.windows.net", parsed.ServiceURL) + require.Equal(t, "dummyaccount", parsed.AccountName) + require.Equal(t, "secretkeykey", parsed.AccountKey) +} + +func TestParseConnectionStringBasic(t *testing.T) { + connStr := "AccountName=dummyaccount;AccountKey=secretkeykey" + parsed, err := ParseConnectionString(connStr) + require.NoError(t, err) + require.Equal(t, "https://dummyaccount.file.core.windows.net", parsed.ServiceURL) + require.Equal(t, "dummyaccount", parsed.AccountName) + require.Equal(t, "secretkeykey", parsed.AccountKey) +} + +func TestParseConnectionStringCustomDomain(t *testing.T) { + connStr := "AccountName=dummyaccount;AccountKey=secretkeykey;FileEndpoint=www.mydomain.com;" + parsed, err := ParseConnectionString(connStr) + require.NoError(t, err) + require.Equal(t, "www.mydomain.com", parsed.ServiceURL) + require.Equal(t, "dummyaccount", parsed.AccountName) + require.Equal(t, "secretkeykey", parsed.AccountKey) +} + +func TestParseConnectionStringSAS(t *testing.T) { + connStr := "AccountName=dummyaccount;SharedAccessSignature=fakesharedaccesssignature;" + parsed, err := ParseConnectionString(connStr) + require.NoError(t, err) + require.Equal(t, "https://dummyaccount.file.core.windows.net/?fakesharedaccesssignature", parsed.ServiceURL) + require.Empty(t, parsed.AccountName) + require.Empty(t, parsed.AccountKey) +} + +func TestParseConnectionStringChinaCloud(t *testing.T) { + connStr := "AccountName=dummyaccountname;AccountKey=secretkeykey;DefaultEndpointsProtocol=http;EndpointSuffix=core.chinacloudapi.cn;" + parsed, err := ParseConnectionString(connStr) + require.NoError(t, err) + require.Equal(t, "http://dummyaccountname.file.core.chinacloudapi.cn", parsed.ServiceURL) + require.Equal(t, "dummyaccountname", parsed.AccountName) + require.Equal(t, "secretkeykey", parsed.AccountKey) +} + +func TestCParseConnectionStringAzurite(t *testing.T) { + connStr := "DefaultEndpointsProtocol=http;AccountName=dummyaccountname;AccountKey=secretkeykey;FileEndpoint=http://local-machine:11002/custom/account/path/faketokensignature;" + parsed, err := ParseConnectionString(connStr) + require.NoError(t, err) + require.Equal(t, "http://local-machine:11002/custom/account/path/faketokensignature", parsed.ServiceURL) + require.Equal(t, "dummyaccountname", parsed.AccountName) + require.Equal(t, "secretkeykey", parsed.AccountKey) +} diff --git a/sdk/storage/azfile/internal/testcommon/clients_auth.go b/sdk/storage/azfile/internal/testcommon/clients_auth.go new file mode 100644 index 000000000000..8e2e562116f0 --- /dev/null +++ b/sdk/storage/azfile/internal/testcommon/clients_auth.go @@ -0,0 +1,224 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +// Contains common helpers for TESTS ONLY +package testcommon + +import ( + "context" + "errors" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/directory" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "github.com/stretchr/testify/require" + "strings" + "testing" + "time" +) + +type TestAccountType string + +const ( + TestAccountDefault TestAccountType = "" + TestAccountSecondary TestAccountType = "SECONDARY_" + TestAccountPremium TestAccountType = "PREMIUM_" + TestAccountSoftDelete TestAccountType = "SOFT_DELETE_" +) + +const ( + DefaultEndpointSuffix = "core.windows.net/" + DefaultFileEndpointSuffix = "file.core.windows.net/" + AccountNameEnvVar = "AZURE_STORAGE_ACCOUNT_NAME" + AccountKeyEnvVar = "AZURE_STORAGE_ACCOUNT_KEY" + DefaultEndpointSuffixEnvVar = "AZURE_STORAGE_ENDPOINT_SUFFIX" +) + +const ( + FakeStorageAccount = "fakestorage" + FakeStorageURL = "https://fakestorage.file.core.windows.net" + FakeToken = "faketoken" +) + +const ( + ISO8601 = "2006-01-02T15:04:05.0000000Z07:00" +) + +var ( + SampleSDDL = `O:S-1-5-32-548G:S-1-5-21-397955417-626881126-188441444-512D:(A;;RPWPCCDCLCSWRCWDWOGA;;;S-1-0-0)` +) + +var BasicMetadata = map[string]*string{ + "foo": to.Ptr("foovalue"), + "bar": to.Ptr("barvalue"), +} + +func SetClientOptions(t *testing.T, opts *azcore.ClientOptions) { + opts.Logging.AllowedHeaders = append(opts.Logging.AllowedHeaders, "X-Request-Mismatch", "X-Request-Mismatch-Error") + + transport, err := recording.NewRecordingHTTPClient(t, nil) + require.NoError(t, err) + opts.Transport = transport +} + +func GetServiceClient(t *testing.T, accountType TestAccountType, options *service.ClientOptions) (*service.Client, error) { + if options == nil { + options = &service.ClientOptions{} + } + + SetClientOptions(t, &options.ClientOptions) + + cred, err := GetGenericSharedKeyCredential(accountType) + if err != nil { + return nil, err + } + + serviceClient, err := service.NewClientWithSharedKeyCredential("https://"+cred.AccountName()+".file.core.windows.net/", cred, options) + + return serviceClient, err +} + +func GetServiceClientNoCredential(t *testing.T, sasUrl string, options *service.ClientOptions) (*service.Client, error) { + if options == nil { + options = &service.ClientOptions{} + } + + SetClientOptions(t, &options.ClientOptions) + + serviceClient, err := service.NewClientWithNoCredential(sasUrl, options) + + return serviceClient, err +} + +func GetGenericAccountInfo(accountType TestAccountType) (string, string) { + if recording.GetRecordMode() == recording.PlaybackMode { + return FakeStorageAccount, "ZmFrZQ==" + } + accountNameEnvVar := string(accountType) + AccountNameEnvVar + accountKeyEnvVar := string(accountType) + AccountKeyEnvVar + accountName, _ := GetRequiredEnv(accountNameEnvVar) + accountKey, _ := GetRequiredEnv(accountKeyEnvVar) + return accountName, accountKey +} + +func GetGenericSharedKeyCredential(accountType TestAccountType) (*service.SharedKeyCredential, error) { + accountName, accountKey := GetGenericAccountInfo(accountType) + if accountName == "" || accountKey == "" { + return nil, errors.New(string(accountType) + AccountNameEnvVar + " and/or " + string(accountType) + AccountKeyEnvVar + " environment variables not specified.") + } + return service.NewSharedKeyCredential(accountName, accountKey) +} + +func GetGenericConnectionString(accountType TestAccountType) (*string, error) { + accountName, accountKey := GetGenericAccountInfo(accountType) + if accountName == "" || accountKey == "" { + return nil, errors.New(string(accountType) + AccountNameEnvVar + " and/or " + string(accountType) + AccountKeyEnvVar + " environment variables not specified.") + } + connectionString := fmt.Sprintf("DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s;EndpointSuffix=core.windows.net/", + accountName, accountKey) + return &connectionString, nil +} + +type FakeCredential struct { +} + +func (c *FakeCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return azcore.AccessToken{Token: FakeToken, ExpiresOn: time.Now().Add(time.Hour).UTC()}, nil +} + +func GetGenericTokenCredential() (azcore.TokenCredential, error) { + if recording.GetRecordMode() == recording.PlaybackMode { + return &FakeCredential{}, nil + } + return azidentity.NewDefaultAzureCredential(nil) +} + +func GetServiceClientFromConnectionString(t *testing.T, accountType TestAccountType, options *service.ClientOptions) (*service.Client, error) { + if options == nil { + options = &service.ClientOptions{} + } + SetClientOptions(t, &options.ClientOptions) + + transport, err := recording.NewRecordingHTTPClient(t, nil) + require.NoError(t, err) + options.Transport = transport + + cred, err := GetGenericConnectionString(accountType) + if err != nil { + return nil, err + } + svcClient, err := service.NewClientFromConnectionString(*cred, options) + return svcClient, err +} + +func GetShareClient(shareName string, s *service.Client) *share.Client { + return s.NewShareClient(shareName) +} + +func CreateNewShare(ctx context.Context, _require *require.Assertions, shareName string, svcClient *service.Client) *share.Client { + shareClient := GetShareClient(shareName, svcClient) + _, err := shareClient.Create(ctx, nil) + _require.NoError(err) + return shareClient +} + +func DeleteShare(ctx context.Context, _require *require.Assertions, shareClient *share.Client) { + _, err := shareClient.Delete(ctx, nil) + _require.NoError(err) +} + +func GetDirectoryClient(dirName string, s *share.Client) *directory.Client { + return s.NewDirectoryClient(dirName) +} + +func CreateNewDirectory(ctx context.Context, _require *require.Assertions, dirName string, shareClient *share.Client) *directory.Client { + dirClient := GetDirectoryClient(dirName, shareClient) + _, err := dirClient.Create(ctx, nil) + _require.NoError(err) + return dirClient +} + +func DeleteDirectory(ctx context.Context, _require *require.Assertions, dirClient *directory.Client) { + _, err := dirClient.Delete(ctx, nil) + _require.NoError(err) +} + +func GetFileClientFromShare(fileName string, shareClient *share.Client) *file.Client { + return shareClient.NewRootDirectoryClient().NewFileClient(fileName) +} + +func CreateNewFileFromShare(ctx context.Context, _require *require.Assertions, fileName string, fileSize int64, shareClient *share.Client) *file.Client { + fClient := GetFileClientFromShare(fileName, shareClient) + + _, err := fClient.Create(ctx, fileSize, nil) + _require.NoError(err) + + return fClient +} + +func CreateNewFileFromShareWithData(ctx context.Context, _require *require.Assertions, fileName string, shareClient *share.Client) *file.Client { + fClient := GetFileClientFromShare(fileName, shareClient) + + _, err := fClient.Create(ctx, int64(len(FileDefaultData)), nil) + _require.NoError(err) + + _, err = fClient.UploadRange(ctx, 0, streaming.NopCloser(strings.NewReader(FileDefaultData)), nil) + _require.NoError(err) + + return fClient +} + +func DeleteFile(ctx context.Context, _require *require.Assertions, fileClient *file.Client) { + _, err := fileClient.Delete(ctx, nil) + _require.NoError(err) +} diff --git a/sdk/storage/azfile/internal/testcommon/common.go b/sdk/storage/azfile/internal/testcommon/common.go new file mode 100644 index 000000000000..11e61800da79 --- /dev/null +++ b/sdk/storage/azfile/internal/testcommon/common.go @@ -0,0 +1,117 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +// Contains common helpers for TESTS ONLY +package testcommon + +import ( + "bytes" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" + "github.com/stretchr/testify/require" + "io" + "os" + "strconv" + "strings" + "testing" +) + +const ( + SharePrefix = "gos" + DirectoryPrefix = "godir" + FilePrefix = "gotestfile" + FileDefaultData = "GoFileDefaultData" +) + +func GenerateShareName(testName string) string { + return SharePrefix + GenerateEntityName(testName) +} + +func GenerateEntityName(testName string) string { + return strings.ReplaceAll(strings.ReplaceAll(strings.ToLower(testName), "/", ""), "test", "") +} + +func GenerateDirectoryName(testName string) string { + return DirectoryPrefix + GenerateEntityName(testName) +} + +func GenerateFileName(testName string) string { + return FilePrefix + GenerateEntityName(testName) +} + +const random64BString string = "2SDgZj6RkKYzJpu04sweQek4uWHO8ndPnYlZ0tnFS61hjnFZ5IkvIGGY44eKABov" + +func GenerateData(sizeInBytes int) (io.ReadSeekCloser, []byte) { + data := make([]byte, sizeInBytes) + _len := len(random64BString) + if sizeInBytes > _len { + count := sizeInBytes / _len + if sizeInBytes%_len != 0 { + count = count + 1 + } + copy(data[:], strings.Repeat(random64BString, count)) + } else { + copy(data[:], random64BString) + } + return streaming.NopCloser(bytes.NewReader(data)), data +} + +func ValidateHTTPErrorCode(_require *require.Assertions, err error, code int) { + _require.Error(err) + var responseErr *azcore.ResponseError + errors.As(err, &responseErr) + if responseErr != nil { + _require.Equal(responseErr.StatusCode, code) + } else { + _require.Equal(strings.Contains(err.Error(), strconv.Itoa(code)), true) + } +} + +func ValidateFileErrorCode(_require *require.Assertions, err error, code fileerror.Code) { + _require.Error(err) + var responseErr *azcore.ResponseError + errors.As(err, &responseErr) + if responseErr != nil { + _require.Equal(string(code), responseErr.ErrorCode) + } else { + _require.Contains(err.Error(), code) + } +} + +// GetRequiredEnv gets an environment variable by name and returns an error if it is not found +func GetRequiredEnv(name string) (string, error) { + env, ok := os.LookupEnv(name) + if ok { + return env, nil + } else { + return "", errors.New("Required environment variable not set: " + name) + } +} + +func BeforeTest(t *testing.T, suite string, test string) { + const urlRegex = `https://\S+\.file\.core\.windows\.net` + const tokenRegex = `(?:Bearer\s).*` + + require.NoError(t, recording.AddURISanitizer(FakeStorageURL, urlRegex, nil)) + require.NoError(t, recording.AddHeaderRegexSanitizer("x-ms-copy-source", FakeStorageURL, urlRegex, nil)) + require.NoError(t, recording.AddHeaderRegexSanitizer("x-ms-copy-source-authorization", FakeToken, tokenRegex, nil)) + // we freeze request IDs and timestamps to avoid creating noisy diffs + // NOTE: we can't freeze time stamps as that breaks some tests that use if-modified-since etc (maybe it can be fixed?) + //testframework.AddHeaderRegexSanitizer("X-Ms-Date", "Wed, 10 Aug 2022 23:34:14 GMT", "", nil) + require.NoError(t, recording.AddHeaderRegexSanitizer("x-ms-request-id", "00000000-0000-0000-0000-000000000000", "", nil)) + //testframework.AddHeaderRegexSanitizer("Date", "Wed, 10 Aug 2022 23:34:14 GMT", "", nil) + // TODO: more freezing + //testframework.AddBodyRegexSanitizer("RequestId:00000000-0000-0000-0000-000000000000", `RequestId:\w{8}-\w{4}-\w{4}-\w{4}-\w{12}`, nil) + //testframework.AddBodyRegexSanitizer("Time:2022-08-11T00:21:56.4562741Z", `Time:\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(?:\.\d*)?Z`, nil) + require.NoError(t, recording.Start(t, "sdk/storage/azfile/testdata", nil)) +} + +func AfterTest(t *testing.T, suite string, test string) { + require.NoError(t, recording.Stop(t, nil)) +} diff --git a/sdk/storage/azfile/lease/client_test.go b/sdk/storage/azfile/lease/client_test.go new file mode 100644 index 000000000000..7b90fbfa9f7f --- /dev/null +++ b/sdk/storage/azfile/lease/client_test.go @@ -0,0 +1,633 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package lease_test + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/testcommon" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/lease" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "testing" + "time" +) + +func Test(t *testing.T) { + recordMode := recording.GetRecordMode() + t.Logf("Running lease Tests in %s mode\n", recordMode) + if recordMode == recording.LiveMode { + suite.Run(t, &LeaseRecordedTestsSuite{}) + suite.Run(t, &LeaseUnrecordedTestsSuite{}) + } else if recordMode == recording.PlaybackMode { + suite.Run(t, &LeaseRecordedTestsSuite{}) + } else if recordMode == recording.RecordingMode { + suite.Run(t, &LeaseRecordedTestsSuite{}) + } +} + +func (l *LeaseRecordedTestsSuite) BeforeTest(suite string, test string) { + testcommon.BeforeTest(l.T(), suite, test) +} + +func (l *LeaseRecordedTestsSuite) AfterTest(suite string, test string) { + testcommon.AfterTest(l.T(), suite, test) +} + +func (l *LeaseUnrecordedTestsSuite) BeforeTest(suite string, test string) { + +} + +func (l *LeaseUnrecordedTestsSuite) AfterTest(suite string, test string) { + +} + +type LeaseRecordedTestsSuite struct { + suite.Suite +} + +type LeaseUnrecordedTestsSuite struct { + suite.Suite +} + +var proposedLeaseIDs = []*string{to.Ptr("c820a799-76d7-4ee2-6e15-546f19325c2c"), to.Ptr("326cc5e1-746e-4af8-4811-a50e6629a8ca")} + +func (l *LeaseRecordedTestsSuite) TestShareAcquireLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + shareLeaseClient, _ := lease.NewShareClient(shareClient, &lease.ShareClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := shareLeaseClient.Acquire(ctx, int32(60), nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *shareLeaseClient.LeaseID()) + + _, err = shareClient.Delete(ctx, nil) + _require.Error(err) + + _, err = shareLeaseClient.Release(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestNegativeShareAcquireMultipleLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + shareLeaseClient0, _ := lease.NewShareClient(shareClient, &lease.ShareClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + shareLeaseClient1, _ := lease.NewShareClient(shareClient, &lease.ShareClientOptions{ + LeaseID: proposedLeaseIDs[1], + }) + + ctx := context.Background() + acquireLeaseResponse0, err := shareLeaseClient0.Acquire(ctx, int32(60), nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse0.LeaseID) + _require.EqualValues(*acquireLeaseResponse0.LeaseID, *shareLeaseClient0.LeaseID()) + + // acquiring lease for the second time returns LeaseAlreadyPresent error + _, err = shareLeaseClient1.Acquire(ctx, int32(60), nil) + _require.Error(err) + + _, err = shareLeaseClient0.Release(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestShareDeleteShareWithoutLeaseId() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + shareLeaseClient, _ := lease.NewShareClient(shareClient, &lease.ShareClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := shareLeaseClient.Acquire(ctx, int32(60), nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *shareLeaseClient.LeaseID()) + + _, err = shareClient.Delete(ctx, nil) + _require.Error(err) + + leaseID := shareLeaseClient.LeaseID() + _, err = shareClient.Delete(ctx, &share.DeleteOptions{ + LeaseAccessConditions: &share.LeaseAccessConditions{LeaseID: leaseID}, + }) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestShareReleaseLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + shareLeaseClient, _ := lease.NewShareClient(shareClient, &lease.ShareClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := shareLeaseClient.Acquire(ctx, int32(60), nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *shareLeaseClient.LeaseID()) + + _, err = shareClient.Delete(ctx, nil) + _require.Error(err) + + _, err = shareLeaseClient.Release(ctx, nil) + _require.NoError(err) + + _, err = shareClient.Delete(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestShareRenewLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + shareLeaseClient, _ := lease.NewShareClient(shareClient, &lease.ShareClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := shareLeaseClient.Acquire(ctx, int32(15), nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *shareLeaseClient.LeaseID()) + + _, err = shareLeaseClient.Renew(ctx, nil) + _require.NoError(err) + + _, err = shareLeaseClient.Release(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestShareBreakLeaseDefault() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + shareLeaseClient, _ := lease.NewShareClient(shareClient, &lease.ShareClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := shareLeaseClient.Acquire(ctx, int32(60), nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *shareLeaseClient.LeaseID()) + + bResp, err := shareLeaseClient.Break(ctx, nil) + _require.NoError(err) + _require.NotNil(bResp.LeaseTime) + + _, err = shareClient.Delete(ctx, nil) + _require.Error(err) + + _, err = shareLeaseClient.Release(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestShareBreakLeaseNonDefault() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + shareLeaseClient, _ := lease.NewShareClient(shareClient, &lease.ShareClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := shareLeaseClient.Acquire(ctx, int32(60), nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *shareLeaseClient.LeaseID()) + + bResp, err := shareLeaseClient.Break(ctx, &lease.ShareBreakOptions{ + BreakPeriod: to.Ptr((int32)(5)), + }) + _require.NoError(err) + _require.NotNil(bResp.LeaseTime) + + _, err = shareClient.Delete(ctx, nil) + _require.Error(err) + + // wait for lease to expire + time.Sleep(6 * time.Second) + + _, err = shareClient.Delete(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestNegativeShareBreakRenewLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + shareLeaseClient, _ := lease.NewShareClient(shareClient, &lease.ShareClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := shareLeaseClient.Acquire(ctx, int32(60), nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *shareLeaseClient.LeaseID()) + + bResp, err := shareLeaseClient.Break(ctx, &lease.ShareBreakOptions{ + BreakPeriod: to.Ptr((int32)(5)), + }) + _require.NoError(err) + _require.NotNil(bResp.LeaseTime) + + // renewing broken lease returns error + _, err = shareLeaseClient.Renew(ctx, nil) + _require.Error(err) + + _, err = shareLeaseClient.Release(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestShareChangeLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + shareLeaseClient, _ := lease.NewShareClient(shareClient, &lease.ShareClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := shareLeaseClient.Acquire(ctx, int32(60), nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *shareLeaseClient.LeaseID()) + + oldLeaseID := shareLeaseClient.LeaseID() + + changeLeaseResp, err := shareLeaseClient.Change(ctx, *proposedLeaseIDs[1], nil) + _require.NoError(err) + _require.EqualValues(changeLeaseResp.LeaseID, proposedLeaseIDs[1]) + _require.EqualValues(shareLeaseClient.LeaseID(), proposedLeaseIDs[1]) + + _, err = shareClient.Delete(ctx, &share.DeleteOptions{ + LeaseAccessConditions: &share.LeaseAccessConditions{ + LeaseID: oldLeaseID, + }, + }) + _require.Error(err) + + _, err = shareLeaseClient.Renew(ctx, nil) + _require.NoError(err) + + _, err = shareLeaseClient.Release(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestFileAcquireLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + ctx := context.Background() + fileName := testcommon.GenerateFileName(testName) + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(ctx, 0, nil) + _require.NoError(err) + + fileLeaseClient, err := lease.NewFileClient(fileClient, &lease.FileClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + _require.NoError(err) + + acquireLeaseResponse, err := fileLeaseClient.Acquire(ctx, nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, fileLeaseClient.LeaseID()) + + _, err = fileClient.Delete(ctx, nil) + _require.Error(err) + + _, err = fileLeaseClient.Release(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestNegativeFileAcquireMultipleLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + ctx := context.Background() + fileName := testcommon.GenerateFileName(testName) + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(ctx, 0, nil) + _require.NoError(err) + + fileLeaseClient0, err := lease.NewFileClient(fileClient, &lease.FileClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + _require.NoError(err) + + fileLeaseClient1, err := lease.NewFileClient(fileClient, &lease.FileClientOptions{ + LeaseID: proposedLeaseIDs[1], + }) + _require.NoError(err) + + acquireLeaseResponse, err := fileLeaseClient0.Acquire(ctx, nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, fileLeaseClient0.LeaseID()) + + // acquiring lease for the second time returns LeaseAlreadyPresent error + _, err = fileLeaseClient1.Acquire(ctx, nil) + _require.Error(err) + + _, err = fileClient.Delete(ctx, nil) + _require.Error(err) + + _, err = fileLeaseClient0.Release(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestDeleteFileWithoutLeaseId() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + ctx := context.Background() + fileName := testcommon.GenerateFileName(testName) + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(ctx, 0, nil) + _require.NoError(err) + + fileLeaseClient, err := lease.NewFileClient(fileClient, &lease.FileClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + _require.NoError(err) + + acquireLeaseResponse, err := fileLeaseClient.Acquire(ctx, nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, fileLeaseClient.LeaseID()) + + _, err = fileClient.Delete(ctx, nil) + _require.Error(err) + + leaseID := fileLeaseClient.LeaseID() + _, err = fileClient.Delete(ctx, &file.DeleteOptions{ + LeaseAccessConditions: &file.LeaseAccessConditions{ + LeaseID: leaseID, + }, + }) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestFileReleaseLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + ctx := context.Background() + fileName := testcommon.GenerateFileName(testName) + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(ctx, 0, nil) + _require.NoError(err) + + fileLeaseClient, err := lease.NewFileClient(fileClient, &lease.FileClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + _require.NoError(err) + + acquireLeaseResponse, err := fileLeaseClient.Acquire(ctx, nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, fileLeaseClient.LeaseID()) + + _, err = fileClient.Delete(ctx, nil) + _require.Error(err) + + _, err = fileLeaseClient.Release(ctx, nil) + _require.NoError(err) + + _, err = fileClient.Delete(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestFileChangeLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + ctx := context.Background() + fileName := testcommon.GenerateFileName(testName) + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(ctx, 0, nil) + _require.NoError(err) + + fileLeaseClient, err := lease.NewFileClient(fileClient, &lease.FileClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + _require.NoError(err) + + acquireLeaseResponse, err := fileLeaseClient.Acquire(ctx, nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.Equal(*acquireLeaseResponse.LeaseID, *proposedLeaseIDs[0]) + + oldLeaseID := fileLeaseClient.LeaseID() + + changeLeaseResp, err := fileLeaseClient.Change(ctx, *proposedLeaseIDs[1], nil) + _require.NoError(err) + _require.Equal(*changeLeaseResp.LeaseID, *proposedLeaseIDs[1]) + + _, err = fileClient.Delete(ctx, &file.DeleteOptions{ + LeaseAccessConditions: &file.LeaseAccessConditions{ + LeaseID: oldLeaseID, + }, + }) + _require.Error(err) + + _, err = fileLeaseClient.Release(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestNegativeFileDeleteAfterReleaseLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + ctx := context.Background() + fileName := testcommon.GenerateFileName(testName) + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(ctx, 0, nil) + _require.NoError(err) + + fileLeaseClient, err := lease.NewFileClient(fileClient, &lease.FileClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + _require.NoError(err) + + acquireLeaseResponse, err := fileLeaseClient.Acquire(ctx, nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, fileLeaseClient.LeaseID()) + + _, err = fileClient.Delete(ctx, nil) + _require.Error(err) + + _, err = fileLeaseClient.Release(ctx, nil) + _require.NoError(err) + + // deleting file after its lease has expired or released returns error. + _, err = fileClient.Delete(ctx, &file.DeleteOptions{ + LeaseAccessConditions: &file.LeaseAccessConditions{ + LeaseID: fileLeaseClient.LeaseID(), + }, + }) + _require.Error(err) +} + +func (l *LeaseRecordedTestsSuite) TestFileBreakLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + ctx := context.Background() + fileName := testcommon.GenerateFileName(testName) + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(ctx, 0, nil) + _require.NoError(err) + + fileLeaseClient, err := lease.NewFileClient(fileClient, &lease.FileClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + _require.NoError(err) + + acquireLeaseResponse, err := fileLeaseClient.Acquire(ctx, nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, fileLeaseClient.LeaseID()) + + _, err = fileClient.Delete(ctx, nil) + _require.Error(err) + + _, err = fileLeaseClient.Break(ctx, nil) + _require.NoError(err) + + _, err = fileClient.Delete(ctx, nil) + _require.NoError(err) +} diff --git a/sdk/storage/azfile/lease/constants.go b/sdk/storage/azfile/lease/constants.go new file mode 100644 index 000000000000..3b384475deb0 --- /dev/null +++ b/sdk/storage/azfile/lease/constants.go @@ -0,0 +1,51 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package lease + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + +// DurationType - When a share is leased, specifies whether the lease is of infinite or fixed duration. +type DurationType = generated.LeaseDurationType + +const ( + DurationTypeInfinite DurationType = generated.LeaseDurationTypeInfinite + DurationTypeFixed DurationType = generated.LeaseDurationTypeFixed +) + +// PossibleDurationTypeValues returns the possible values for the DurationType const type. +func PossibleDurationTypeValues() []DurationType { + return generated.PossibleLeaseDurationTypeValues() +} + +// StateType - Lease state of the share. +type StateType = generated.LeaseStateType + +const ( + StateTypeAvailable StateType = generated.LeaseStateTypeAvailable + StateTypeLeased StateType = generated.LeaseStateTypeLeased + StateTypeExpired StateType = generated.LeaseStateTypeExpired + StateTypeBreaking StateType = generated.LeaseStateTypeBreaking + StateTypeBroken StateType = generated.LeaseStateTypeBroken +) + +// PossibleStateTypeValues returns the possible values for the StateType const type. +func PossibleStateTypeValues() []StateType { + return generated.PossibleLeaseStateTypeValues() +} + +// StatusType - The current lease status of the share. +type StatusType = generated.LeaseStatusType + +const ( + StatusTypeLocked StatusType = generated.LeaseStatusTypeLocked + StatusTypeUnlocked StatusType = generated.LeaseStatusTypeUnlocked +) + +// PossibleStatusTypeValues returns the possible values for the StatusType const type. +func PossibleStatusTypeValues() []StatusType { + return generated.PossibleLeaseStatusTypeValues() +} diff --git a/sdk/storage/azfile/lease/examples_test.go b/sdk/storage/azfile/lease/examples_test.go new file mode 100644 index 000000000000..f8b61b3ba133 --- /dev/null +++ b/sdk/storage/azfile/lease/examples_test.go @@ -0,0 +1,101 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package lease_test + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/lease" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "log" + "os" +) + +func handleError(err error) { + if err != nil { + log.Fatal(err.Error()) + } +} + +// This example shows how to perform various lease operations on a share. +// The same lease operations can be performed on individual files as well. +// A lease on a share prevents it from being deleted by others, while a lease on a file +// protects it from both modifications and deletions. +func Example_lease_ShareClient_AcquireLease() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + // Create a unique ID for the lease + // A lease ID can be any valid GUID string format. To generate UUIDs, consider the github.com/google/uuid package + leaseID := "36b1a876-cf98-4eb2-a5c3-6d68489658ff" + shareLeaseClient, err := lease.NewShareClient(shareClient, &lease.ShareClientOptions{LeaseID: to.Ptr(leaseID)}) + handleError(err) + + // Now acquire a lease on the share. + // You can choose to pass an empty string for proposed ID so that the service automatically assigns one for you. + duration := int32(60) + acquireLeaseResponse, err := shareLeaseClient.Acquire(context.TODO(), duration, nil) + handleError(err) + fmt.Println("The share is leased for delete operations with lease ID", *acquireLeaseResponse.LeaseID) + + // The share cannot be deleted without providing the lease ID. + _, err = shareClient.Delete(context.TODO(), nil) + if err == nil { + log.Fatal("delete should have failed") + } + + fmt.Println("The share cannot be deleted while there is an active lease") + + // share can be deleted by providing the lease id + //_, err = shareClient.Delete(context.TODO(), &share.DeleteOptions{ + // LeaseAccessConditions: &share.LeaseAccessConditions{LeaseID: acquireLeaseResponse.LeaseID}, + //}) + + // We can release the lease now and the share can be deleted. + _, err = shareLeaseClient.Release(context.TODO(), nil) + handleError(err) + fmt.Println("The lease on the share is now released") + + // AcquireLease a lease again to perform other operations. + // Duration is still 60 + acquireLeaseResponse, err = shareLeaseClient.Acquire(context.TODO(), duration, nil) + handleError(err) + fmt.Println("The share is leased again with lease ID", *acquireLeaseResponse.LeaseID) + + // We can change the ID of an existing lease. + newLeaseID := "6b3e65e5-e1bb-4a3f-8b72-13e9bc9cd3bf" + changeLeaseResponse, err := shareLeaseClient.Change(context.TODO(), newLeaseID, nil) + handleError(err) + fmt.Println("The lease ID was changed to", *changeLeaseResponse.LeaseID) + + // The lease can be renewed. + renewLeaseResponse, err := shareLeaseClient.Renew(context.TODO(), nil) + handleError(err) + fmt.Println("The lease was renewed with the same ID", *renewLeaseResponse.LeaseID) + + // Finally, the lease can be broken, and we could prevent others from acquiring a lease for a period of time + _, err = shareLeaseClient.Break(context.TODO(), &lease.ShareBreakOptions{BreakPeriod: to.Ptr(int32(60))}) + handleError(err) + fmt.Println("The lease was broken, and nobody can acquire a lease for 60 seconds") +} diff --git a/sdk/storage/azfile/lease/file_client.go b/sdk/storage/azfile/lease/file_client.go new file mode 100644 index 000000000000..b1bffc781a5b --- /dev/null +++ b/sdk/storage/azfile/lease/file_client.go @@ -0,0 +1,103 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package lease + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" +) + +// FileClient provides lease functionality for the underlying file client. +type FileClient struct { + fileClient *file.Client + leaseID *string +} + +// FileClientOptions contains the optional values when creating a FileClient. +type FileClientOptions struct { + // LeaseID contains a caller-provided lease ID. + LeaseID *string +} + +// NewFileClient creates a file lease client for the provided file client. +// - client - an instance of a file client +// - options - client options; pass nil to accept the default values +func NewFileClient(client *file.Client, options *FileClientOptions) (*FileClient, error) { + var leaseID *string + if options != nil { + leaseID = options.LeaseID + } + + leaseID, err := shared.GenerateLeaseID(leaseID) + if err != nil { + return nil, err + } + + return &FileClient{ + fileClient: client, + leaseID: leaseID, + }, nil +} + +func (f *FileClient) generated() *generated.FileClient { + return base.InnerClient((*base.Client[generated.FileClient])(f.fileClient)) +} + +// LeaseID returns leaseID of the client. +func (f *FileClient) LeaseID() *string { + return f.leaseID +} + +// Acquire operation can be used to request a new lease. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/lease-file. +func (f *FileClient) Acquire(ctx context.Context, options *FileAcquireOptions) (FileAcquireResponse, error) { + opts := options.format(f.LeaseID()) + resp, err := f.generated().AcquireLease(ctx, (int32)(-1), opts) + return resp, err +} + +// Break operation can be used to break the lease, if the file has an active lease. Once a lease is broken, it cannot be renewed. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/lease-file. +func (f *FileClient) Break(ctx context.Context, options *FileBreakOptions) (FileBreakResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := f.generated().BreakLease(ctx, opts, leaseAccessConditions) + return resp, err +} + +// Change operation can be used to change the lease ID of an active lease. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/lease-file. +func (f *FileClient) Change(ctx context.Context, proposedLeaseID string, options *FileChangeOptions) (FileChangeResponse, error) { + if f.LeaseID() == nil { + return FileChangeResponse{}, errors.New("leaseID cannot be nil") + } + + opts := options.format(&proposedLeaseID) + resp, err := f.generated().ChangeLease(ctx, *f.LeaseID(), opts) + + // If lease has been changed successfully, set the leaseID in client + if err == nil { + f.leaseID = &proposedLeaseID + } + + return resp, err +} + +// Release operation can be used to free the lease if it is no longer needed so that another client may immediately acquire a lease against the file. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/lease-file. +func (f *FileClient) Release(ctx context.Context, options *FileReleaseOptions) (FileReleaseResponse, error) { + if f.LeaseID() == nil { + return FileReleaseResponse{}, errors.New("leaseID cannot be nil") + } + + opts := options.format() + resp, err := f.generated().ReleaseLease(ctx, *f.LeaseID(), opts) + return resp, err +} diff --git a/sdk/storage/azfile/lease/models.go b/sdk/storage/azfile/lease/models.go new file mode 100644 index 000000000000..0de250f8aeb4 --- /dev/null +++ b/sdk/storage/azfile/lease/models.go @@ -0,0 +1,147 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package lease + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + +// AccessConditions contains optional parameters to access leased entity. +type AccessConditions = generated.LeaseAccessConditions + +// FileAcquireOptions contains the optional parameters for the FileClient.Acquire method. +type FileAcquireOptions struct { + // placeholder for future options +} + +func (o *FileAcquireOptions) format(proposedLeaseID *string) *generated.FileClientAcquireLeaseOptions { + return &generated.FileClientAcquireLeaseOptions{ + ProposedLeaseID: proposedLeaseID, + } +} + +// FileBreakOptions contains the optional parameters for the FileClient.Break method. +type FileBreakOptions struct { + // AccessConditions contains optional parameters to access leased entity. + AccessConditions *AccessConditions +} + +func (o *FileBreakOptions) format() (*generated.FileClientBreakLeaseOptions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.AccessConditions +} + +// FileChangeOptions contains the optional parameters for the FileClient.Change method. +type FileChangeOptions struct { + // placeholder for future options +} + +func (o *FileChangeOptions) format(proposedLeaseID *string) *generated.FileClientChangeLeaseOptions { + return &generated.FileClientChangeLeaseOptions{ + ProposedLeaseID: proposedLeaseID, + } +} + +// FileReleaseOptions contains the optional parameters for the FileClient.Release method. +type FileReleaseOptions struct { + // placeholder for future options +} + +func (o *FileReleaseOptions) format() *generated.FileClientReleaseLeaseOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ShareAcquireOptions contains the optional parameters for the ShareClient.Acquire method. +type ShareAcquireOptions struct { + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string +} + +func (o *ShareAcquireOptions) format(proposedLeaseID *string) *generated.ShareClientAcquireLeaseOptions { + opts := &generated.ShareClientAcquireLeaseOptions{ + ProposedLeaseID: proposedLeaseID, + } + if o != nil { + opts.Sharesnapshot = o.ShareSnapshot + } + return opts +} + +// ShareBreakOptions contains the optional parameters for the ShareClient.Break method. +type ShareBreakOptions struct { + // For a break operation, this is the proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string + // AccessConditions contains optional parameters to access leased entity. + AccessConditions *AccessConditions +} + +func (o *ShareBreakOptions) format() (*generated.ShareClientBreakLeaseOptions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return &generated.ShareClientBreakLeaseOptions{ + BreakPeriod: o.BreakPeriod, + Sharesnapshot: o.ShareSnapshot, + }, o.AccessConditions +} + +// ShareChangeOptions contains the optional parameters for the ShareClient.Change method. +type ShareChangeOptions struct { + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string +} + +func (o *ShareChangeOptions) format(proposedLeaseID *string) *generated.ShareClientChangeLeaseOptions { + opts := &generated.ShareClientChangeLeaseOptions{ + ProposedLeaseID: proposedLeaseID, + } + if o != nil { + opts.Sharesnapshot = o.ShareSnapshot + } + return opts +} + +// ShareReleaseOptions contains the optional parameters for the ShareClient.Release method. +type ShareReleaseOptions struct { + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string +} + +func (o *ShareReleaseOptions) format() *generated.ShareClientReleaseLeaseOptions { + if o == nil { + return nil + } + return &generated.ShareClientReleaseLeaseOptions{ + Sharesnapshot: o.ShareSnapshot, + } +} + +// ShareRenewOptions contains the optional parameters for the ShareClient.Renew method. +type ShareRenewOptions struct { + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string +} + +func (o *ShareRenewOptions) format() *generated.ShareClientRenewLeaseOptions { + if o == nil { + return nil + } + return &generated.ShareClientRenewLeaseOptions{ + Sharesnapshot: o.ShareSnapshot, + } +} diff --git a/sdk/storage/azfile/lease/responses.go b/sdk/storage/azfile/lease/responses.go new file mode 100644 index 000000000000..23a5a1db3063 --- /dev/null +++ b/sdk/storage/azfile/lease/responses.go @@ -0,0 +1,36 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package lease + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + +// FileAcquireResponse contains the response from method FileClient.Acquire. +type FileAcquireResponse = generated.FileClientAcquireLeaseResponse + +// FileBreakResponse contains the response from method FileClient.Break. +type FileBreakResponse = generated.FileClientBreakLeaseResponse + +// FileChangeResponse contains the response from method FileClient.Change. +type FileChangeResponse = generated.FileClientChangeLeaseResponse + +// FileReleaseResponse contains the response from method FileClient.Release. +type FileReleaseResponse = generated.FileClientReleaseLeaseResponse + +// ShareAcquireResponse contains the response from method ShareClient.Acquire. +type ShareAcquireResponse = generated.ShareClientAcquireLeaseResponse + +// ShareBreakResponse contains the response from method ShareClient.Break. +type ShareBreakResponse = generated.ShareClientBreakLeaseResponse + +// ShareChangeResponse contains the response from method ShareClient.Change. +type ShareChangeResponse = generated.ShareClientChangeLeaseResponse + +// ShareReleaseResponse contains the response from method ShareClient.Release. +type ShareReleaseResponse = generated.ShareClientReleaseLeaseResponse + +// ShareRenewResponse contains the response from method ShareClient.Renew. +type ShareRenewResponse = generated.ShareClientRenewLeaseResponse diff --git a/sdk/storage/azfile/lease/share_client.go b/sdk/storage/azfile/lease/share_client.go new file mode 100644 index 000000000000..ff4db564c57f --- /dev/null +++ b/sdk/storage/azfile/lease/share_client.go @@ -0,0 +1,116 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package lease + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" +) + +// ShareClient provides lease functionality for the underlying share client. +type ShareClient struct { + shareClient *share.Client + leaseID *string +} + +// ShareClientOptions contains the optional values when creating a ShareClient. +type ShareClientOptions struct { + // LeaseID contains a caller-provided lease ID. + LeaseID *string +} + +// NewShareClient creates a share lease client for the provided share client. +// - client - an instance of a share client +// - options - client options; pass nil to accept the default values +func NewShareClient(client *share.Client, options *ShareClientOptions) (*ShareClient, error) { + var leaseID *string + if options != nil { + leaseID = options.LeaseID + } + + leaseID, err := shared.GenerateLeaseID(leaseID) + if err != nil { + return nil, err + } + + return &ShareClient{ + shareClient: client, + leaseID: leaseID, + }, nil +} + +func (s *ShareClient) generated() *generated.ShareClient { + return base.InnerClient((*base.Client[generated.ShareClient])(s.shareClient)) +} + +// LeaseID returns leaseID of the client. +func (s *ShareClient) LeaseID() *string { + return s.leaseID +} + +// Acquire operation can be used to request a new lease. +// The lease duration must be between 15 and 60 seconds, or infinite (-1). +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/lease-share. +func (s *ShareClient) Acquire(ctx context.Context, duration int32, options *ShareAcquireOptions) (ShareAcquireResponse, error) { + opts := options.format(s.LeaseID()) + resp, err := s.generated().AcquireLease(ctx, duration, opts) + return resp, err +} + +// Break operation can be used to break the lease, if the file share has an active lease. Once a lease is broken, it cannot be renewed. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/lease-share. +func (s *ShareClient) Break(ctx context.Context, options *ShareBreakOptions) (ShareBreakResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := s.generated().BreakLease(ctx, opts, leaseAccessConditions) + return resp, err +} + +// Change operation can be used to change the lease ID of an active lease. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/lease-share. +func (s *ShareClient) Change(ctx context.Context, proposedLeaseID string, options *ShareChangeOptions) (ShareChangeResponse, error) { + if s.LeaseID() == nil { + return ShareChangeResponse{}, errors.New("leaseID cannot be nil") + } + + opts := options.format(&proposedLeaseID) + resp, err := s.generated().ChangeLease(ctx, *s.LeaseID(), opts) + + // If lease has been changed successfully, set the leaseID in client + if err == nil { + s.leaseID = &proposedLeaseID + } + + return resp, err +} + +// Release operation can be used to free the lease if it is no longer needed so that another client may immediately acquire a lease against the file share. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/lease-share. +func (s *ShareClient) Release(ctx context.Context, options *ShareReleaseOptions) (ShareReleaseResponse, error) { + if s.LeaseID() == nil { + return ShareReleaseResponse{}, errors.New("leaseID cannot be nil") + } + + opts := options.format() + resp, err := s.generated().ReleaseLease(ctx, *s.LeaseID(), opts) + return resp, err +} + +// Renew operation can be used to renew an existing lease. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/lease-share. +func (s *ShareClient) Renew(ctx context.Context, options *ShareRenewOptions) (ShareRenewResponse, error) { + if s.LeaseID() == nil { + return ShareRenewResponse{}, errors.New("leaseID cannot be nil") + } + + opts := options.format() + resp, err := s.generated().RenewLease(ctx, *s.LeaseID(), opts) + return resp, err +} diff --git a/sdk/storage/azfile/log.go b/sdk/storage/azfile/log.go new file mode 100644 index 000000000000..f59215653531 --- /dev/null +++ b/sdk/storage/azfile/log.go @@ -0,0 +1,16 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azfile + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" +) + +const ( + // EventUpload is used for logging events related to upload operation. + EventUpload = exported.EventUpload +) diff --git a/sdk/storage/azfile/sas/account.go b/sdk/storage/azfile/sas/account.go new file mode 100644 index 000000000000..6b0c0067e811 --- /dev/null +++ b/sdk/storage/azfile/sas/account.go @@ -0,0 +1,183 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// AccountSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas +type AccountSignatureValues struct { + Version string `param:"sv"` // If not specified, this format to SASVersion + Protocol Protocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + Permissions string `param:"sp"` // Create by initializing AccountPermissions and then call String() + IPRange IPRange `param:"sip"` + ResourceTypes string `param:"srt"` // Create by initializing AccountResourceTypes and then call String() +} + +// SignWithSharedKey uses an account's shared key credential to sign this signature values to produce +// the proper SAS query parameters. +func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS + if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" { + return QueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") + } + if v.Version == "" { + v.Version = Version + } + perms, err := parseAccountPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + + resources, err := parseAccountResourceTypes(v.ResourceTypes) + if err != nil { + return QueryParameters{}, err + } + v.ResourceTypes = resources.String() + + startTime, expiryTime, _ := formatTimesForSigning(v.StartTime, v.ExpiryTime, time.Time{}) + + stringToSign := strings.Join([]string{ + sharedKeyCredential.AccountName(), + v.Permissions, + "f", // file service + v.ResourceTypes, + startTime, + expiryTime, + v.IPRange.String(), + string(v.Protocol), + v.Version, + ""}, // That is right, the account SAS requires a terminating extra newline + "\n") + + signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Account-specific SAS parameters + services: "f", // will always be "f" for Azure File + resourceTypes: v.ResourceTypes, + + // Calculated SAS signature + signature: signature, + } + + return p, nil +} + +// AccountPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSignatureValues' Permissions field. +type AccountPermissions struct { + Read, Write, Delete, List, Create bool +} + +// String produces the SAS permissions string for an Azure Storage account. +// Call this method to set AccountSignatureValues' Permissions field. +func (p *AccountPermissions) String() string { + var buffer bytes.Buffer + if p.Read { + buffer.WriteRune('r') + } + if p.Write { + buffer.WriteRune('w') + } + if p.Delete { + buffer.WriteRune('d') + } + if p.List { + buffer.WriteRune('l') + } + if p.Create { + buffer.WriteRune('c') + } + return buffer.String() +} + +// parseAccountPermissions initializes the AccountPermissions' fields from a string. +func parseAccountPermissions(s string) (AccountPermissions, error) { + p := AccountPermissions{} // Clear out the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + case 'c': + p.Create = true + default: + return AccountPermissions{}, fmt.Errorf("invalid permission character: '%v'", r) + } + } + return p, nil +} + +// AccountResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSignatureValues' ResourceTypes field. +type AccountResourceTypes struct { + Service, Container, Object bool +} + +// String produces the SAS resource types string for an Azure Storage account. +// Call this method to set AccountSignatureValues' ResourceTypes field. +func (rt *AccountResourceTypes) String() string { + var buffer bytes.Buffer + if rt.Service { + buffer.WriteRune('s') + } + if rt.Container { + buffer.WriteRune('c') + } + if rt.Object { + buffer.WriteRune('o') + } + return buffer.String() +} + +// parseAccountResourceTypes initializes the AccountResourceTypes' fields from a string. +func parseAccountResourceTypes(s string) (AccountResourceTypes, error) { + rt := AccountResourceTypes{} + for _, r := range s { + switch r { + case 's': + rt.Service = true + case 'c': + rt.Container = true + case 'o': + rt.Object = true + default: + return AccountResourceTypes{}, fmt.Errorf("invalid resource type character: '%v'", r) + } + } + return rt, nil +} diff --git a/sdk/storage/azfile/sas/account_test.go b/sdk/storage/azfile/sas/account_test.go new file mode 100644 index 000000000000..d22d645185ed --- /dev/null +++ b/sdk/storage/azfile/sas/account_test.go @@ -0,0 +1,124 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "github.com/stretchr/testify/require" + "testing" +) + +func TestAccountPermissions_String(t *testing.T) { + testdata := []struct { + input AccountPermissions + expected string + }{ + {input: AccountPermissions{Read: true}, expected: "r"}, + {input: AccountPermissions{Write: true}, expected: "w"}, + {input: AccountPermissions{Delete: true}, expected: "d"}, + {input: AccountPermissions{List: true}, expected: "l"}, + {input: AccountPermissions{Create: true}, expected: "c"}, + {input: AccountPermissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Create: true, + }, expected: "rwdlc"}, + } + for _, c := range testdata { + require.Equal(t, c.expected, c.input.String()) + } +} + +func TestAccountPermissions_Parse(t *testing.T) { + testdata := []struct { + input string + expected AccountPermissions + }{ + {expected: AccountPermissions{Read: true}, input: "r"}, + {expected: AccountPermissions{Write: true}, input: "w"}, + {expected: AccountPermissions{Delete: true}, input: "d"}, + {expected: AccountPermissions{List: true}, input: "l"}, + {expected: AccountPermissions{Create: true}, input: "c"}, + {expected: AccountPermissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Create: true, + }, input: "rwdlc"}, + {expected: AccountPermissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Create: true, + }, input: "rcdlw"}, + } + for _, c := range testdata { + permissions, err := parseAccountPermissions(c.input) + require.Nil(t, err) + require.Equal(t, c.expected, permissions) + } +} + +func TestAccountPermissions_ParseNegative(t *testing.T) { + _, err := parseAccountPermissions("rwldcz") // Here 'z' is invalid + require.NotNil(t, err) + require.Contains(t, err.Error(), "122") +} + +func TestAccountResourceTypes_String(t *testing.T) { + testdata := []struct { + input AccountResourceTypes + expected string + }{ + {input: AccountResourceTypes{Service: true}, expected: "s"}, + {input: AccountResourceTypes{Container: true}, expected: "c"}, + {input: AccountResourceTypes{Object: true}, expected: "o"}, + {input: AccountResourceTypes{ + Service: true, + Container: true, + Object: true, + }, expected: "sco"}, + } + for _, c := range testdata { + require.Equal(t, c.expected, c.input.String()) + } +} + +func TestAccountResourceTypes_Parse(t *testing.T) { + testdata := []struct { + input string + expected AccountResourceTypes + }{ + {expected: AccountResourceTypes{Service: true}, input: "s"}, + {expected: AccountResourceTypes{Container: true}, input: "c"}, + {expected: AccountResourceTypes{Object: true}, input: "o"}, + {expected: AccountResourceTypes{ + Service: true, + Container: true, + Object: true, + }, input: "sco"}, + {expected: AccountResourceTypes{ + Service: true, + Container: true, + Object: true, + }, input: "osc"}, + } + for _, c := range testdata { + permissions, err := parseAccountResourceTypes(c.input) + require.Nil(t, err) + require.Equal(t, c.expected, permissions) + } +} + +func TestAccountResourceTypes_ParseNegative(t *testing.T) { + _, err := parseAccountResourceTypes("scoz") // Here 'z' is invalid + require.NotNil(t, err) + require.Contains(t, err.Error(), "122") +} diff --git a/sdk/storage/azfile/sas/query_params.go b/sdk/storage/azfile/sas/query_params.go new file mode 100644 index 000000000000..5bf5422d6082 --- /dev/null +++ b/sdk/storage/azfile/sas/query_params.go @@ -0,0 +1,339 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "errors" + "net" + "net/url" + "strings" + "time" +) + +// timeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time. +const ( + timeFormat = "2006-01-02T15:04:05Z" // "2017-07-27T00:00:00Z" // ISO 8601 + SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00" +) + +var ( + // Version is the default version encoded in the SAS token. + Version = "2020-02-10" +) + +// TimeFormats ISO 8601 format. +// Please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details. +var timeFormats = []string{"2006-01-02T15:04:05.0000000Z", timeFormat, "2006-01-02T15:04Z", "2006-01-02"} + +// Protocol indicates the http/https. +type Protocol string + +const ( + // ProtocolHTTPS can be specified for a SAS protocol. + ProtocolHTTPS Protocol = "https" + + // ProtocolHTTPSandHTTP can be specified for a SAS protocol. + ProtocolHTTPSandHTTP Protocol = "https,http" +) + +// FormatTimesForSigning converts a time.Time to a SnapshotTimeFormat string suitable for a +// Field's StartTime or ExpiryTime fields. Returns "" if value.IsZero(). +func formatTimesForSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) { + ss := "" + if !startTime.IsZero() { + ss = formatTimeWithDefaultFormat(&startTime) + } + se := "" + if !expiryTime.IsZero() { + se = formatTimeWithDefaultFormat(&expiryTime) + } + sh := "" + if !snapshotTime.IsZero() { + sh = snapshotTime.Format(SnapshotTimeFormat) + } + return ss, se, sh +} + +// formatTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ". +func formatTimeWithDefaultFormat(t *time.Time) string { + return formatTime(t, timeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// formatTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default. +func formatTime(t *time.Time, format string) string { + if format != "" { + return t.Format(format) + } + return t.Format(timeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// ParseTime try to parse a SAS time string. +func parseTime(val string) (t time.Time, timeFormat string, err error) { + for _, sasTimeFormat := range timeFormats { + t, err = time.Parse(sasTimeFormat, val) + if err == nil { + timeFormat = sasTimeFormat + break + } + } + + if err != nil { + err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details") + } + + return +} + +// IPRange represents a SAS IP range's start IP and (optionally) end IP. +type IPRange struct { + Start net.IP // Not specified if length = 0 + End net.IP // Not specified if length = 0 +} + +// String returns a string representation of an IPRange. +func (ipr *IPRange) String() string { + if len(ipr.Start) == 0 { + return "" + } + start := ipr.Start.String() + if len(ipr.End) == 0 { + return start + } + return start + "-" + ipr.End.String() +} + +// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + +// QueryParameters object represents the components that make up an Azure Storage SAS' query parameters. +// You parse a map of query parameters into its fields by calling NewQueryParameters(). You add the components +// to a query parameter map by calling AddToValues(). +// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type. +// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues). +type QueryParameters struct { + // All members are immutable or values so copies of this struct are goroutine-safe. + version string `param:"sv"` + services string `param:"ss"` + resourceTypes string `param:"srt"` + protocol Protocol `param:"spr"` + startTime time.Time `param:"st"` + expiryTime time.Time `param:"se"` + shareSnapshotTime time.Time `param:"sharesnapshot"` + ipRange IPRange `param:"sip"` + identifier string `param:"si"` + resource string `param:"sr"` + permissions string `param:"sp"` + signature string `param:"sig"` + cacheControl string `param:"rscc"` + contentDisposition string `param:"rscd"` + contentEncoding string `param:"rsce"` + contentLanguage string `param:"rscl"` + contentType string `param:"rsct"` + // private member used for startTime and expiryTime formatting. + stTimeFormat string + seTimeFormat string +} + +// ShareSnapshotTime returns shareSnapshotTime. +func (p *QueryParameters) ShareSnapshotTime() time.Time { + return p.shareSnapshotTime +} + +// Version returns version. +func (p *QueryParameters) Version() string { + return p.version +} + +// Services returns services. +func (p *QueryParameters) Services() string { + return p.services +} + +// ResourceTypes returns resourceTypes. +func (p *QueryParameters) ResourceTypes() string { + return p.resourceTypes +} + +// Protocol returns protocol. +func (p *QueryParameters) Protocol() Protocol { + return p.protocol +} + +// StartTime returns startTime. +func (p *QueryParameters) StartTime() time.Time { + return p.startTime +} + +// ExpiryTime returns expiryTime. +func (p *QueryParameters) ExpiryTime() time.Time { + return p.expiryTime +} + +// IPRange returns ipRange. +func (p *QueryParameters) IPRange() IPRange { + return p.ipRange +} + +// Identifier returns identifier. +func (p *QueryParameters) Identifier() string { + return p.identifier +} + +// Resource returns resource. +func (p *QueryParameters) Resource() string { + return p.resource +} + +// Permissions returns permissions. +func (p *QueryParameters) Permissions() string { + return p.permissions +} + +// Signature returns signature. +func (p *QueryParameters) Signature() string { + return p.signature +} + +// CacheControl returns cacheControl. +func (p *QueryParameters) CacheControl() string { + return p.cacheControl +} + +// ContentDisposition returns contentDisposition. +func (p *QueryParameters) ContentDisposition() string { + return p.contentDisposition +} + +// ContentEncoding returns contentEncoding. +func (p *QueryParameters) ContentEncoding() string { + return p.contentEncoding +} + +// ContentLanguage returns contentLanguage. +func (p *QueryParameters) ContentLanguage() string { + return p.contentLanguage +} + +// ContentType returns contentType. +func (p *QueryParameters) ContentType() string { + return p.contentType +} + +// Encode encodes the SAS query parameters into URL encoded form sorted by key. +func (p *QueryParameters) Encode() string { + v := url.Values{} + + if p.version != "" { + v.Add("sv", p.version) + } + if p.services != "" { + v.Add("ss", p.services) + } + if p.resourceTypes != "" { + v.Add("srt", p.resourceTypes) + } + if p.protocol != "" { + v.Add("spr", string(p.protocol)) + } + if !p.startTime.IsZero() { + v.Add("st", formatTime(&(p.startTime), p.stTimeFormat)) + } + if !p.expiryTime.IsZero() { + v.Add("se", formatTime(&(p.expiryTime), p.seTimeFormat)) + } + if len(p.ipRange.Start) > 0 { + v.Add("sip", p.ipRange.String()) + } + if p.identifier != "" { + v.Add("si", p.identifier) + } + if p.resource != "" { + v.Add("sr", p.resource) + } + if p.permissions != "" { + v.Add("sp", p.permissions) + } + if p.signature != "" { + v.Add("sig", p.signature) + } + if p.cacheControl != "" { + v.Add("rscc", p.cacheControl) + } + if p.contentDisposition != "" { + v.Add("rscd", p.contentDisposition) + } + if p.contentEncoding != "" { + v.Add("rsce", p.contentEncoding) + } + if p.contentLanguage != "" { + v.Add("rscl", p.contentLanguage) + } + if p.contentType != "" { + v.Add("rsct", p.contentType) + } + + return v.Encode() +} + +// NewQueryParameters creates and initializes a QueryParameters object based on the +// query parameter map's passed-in values. If deleteSASParametersFromValues is true, +// all SAS-related query parameters are removed from the passed-in map. If +// deleteSASParametersFromValues is false, the map passed-in map is unaltered. +func NewQueryParameters(values url.Values, deleteSASParametersFromValues bool) QueryParameters { + p := QueryParameters{} + for k, v := range values { + val := v[0] + isSASKey := true + switch strings.ToLower(k) { + case "sv": + p.version = val + case "ss": + p.services = val + case "srt": + p.resourceTypes = val + case "spr": + p.protocol = Protocol(val) + case "sharesnapshot": + p.shareSnapshotTime, _ = time.Parse(SnapshotTimeFormat, val) + case "st": + p.startTime, p.stTimeFormat, _ = parseTime(val) + case "se": + p.expiryTime, p.seTimeFormat, _ = parseTime(val) + case "sip": + dashIndex := strings.Index(val, "-") + if dashIndex == -1 { + p.ipRange.Start = net.ParseIP(val) + } else { + p.ipRange.Start = net.ParseIP(val[:dashIndex]) + p.ipRange.End = net.ParseIP(val[dashIndex+1:]) + } + case "si": + p.identifier = val + case "sr": + p.resource = val + case "sp": + p.permissions = val + case "sig": + p.signature = val + case "rscc": + p.cacheControl = val + case "rscd": + p.contentDisposition = val + case "rsce": + p.contentEncoding = val + case "rscl": + p.contentLanguage = val + case "rsct": + p.contentType = val + default: + isSASKey = false // We didn't recognize the query parameter + } + if isSASKey && deleteSASParametersFromValues { + delete(values, k) + } + } + return p +} diff --git a/sdk/storage/azfile/sas/query_params_test.go b/sdk/storage/azfile/sas/query_params_test.go new file mode 100644 index 000000000000..7d699f9c3396 --- /dev/null +++ b/sdk/storage/azfile/sas/query_params_test.go @@ -0,0 +1,211 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "fmt" + "net" + "net/url" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestFormatTimesForSigning(t *testing.T) { + testdata := []struct { + inputStart time.Time + inputEnd time.Time + inputSnapshot time.Time + expectedStart string + expectedEnd string + expectedSnapshot string + }{ + {expectedStart: "", expectedEnd: "", expectedSnapshot: ""}, + {inputStart: time.Date(1955, 6, 25, 22, 15, 56, 345456, time.UTC), expectedStart: "1955-06-25T22:15:56Z", expectedEnd: "", expectedSnapshot: ""}, + {inputEnd: time.Date(2023, 4, 5, 8, 50, 27, 4500, time.UTC), expectedStart: "", expectedEnd: "2023-04-05T08:50:27Z", expectedSnapshot: ""}, + {inputSnapshot: time.Date(2021, 1, 5, 22, 15, 33, 1234879, time.UTC), expectedStart: "", expectedEnd: "", expectedSnapshot: "2021-01-05T22:15:33.0012348Z"}, + { + inputStart: time.Date(1955, 6, 25, 22, 15, 56, 345456, time.UTC), + inputEnd: time.Date(2023, 4, 5, 8, 50, 27, 4500, time.UTC), + inputSnapshot: time.Date(2021, 1, 5, 22, 15, 33, 1234879, time.UTC), + expectedStart: "1955-06-25T22:15:56Z", + expectedEnd: "2023-04-05T08:50:27Z", + expectedSnapshot: "2021-01-05T22:15:33.0012348Z", + }, + } + for _, c := range testdata { + start, end, ss := formatTimesForSigning(c.inputStart, c.inputEnd, c.inputSnapshot) + require.Equal(t, c.expectedStart, start) + require.Equal(t, c.expectedEnd, end) + require.Equal(t, c.expectedSnapshot, ss) + } +} + +func TestFormatTimeWithDefaultFormat(t *testing.T) { + testdata := []struct { + input time.Time + expectedTime string + }{ + {input: time.Date(1955, 4, 5, 8, 50, 27, 4500, time.UTC), expectedTime: "1955-04-05T08:50:27Z"}, + {input: time.Date(1917, 3, 9, 16, 22, 56, 0, time.UTC), expectedTime: "1917-03-09T16:22:56Z"}, + {input: time.Date(2021, 1, 5, 22, 15, 0, 0, time.UTC), expectedTime: "2021-01-05T22:15:00Z"}, + {input: time.Date(2023, 6, 25, 0, 0, 0, 0, time.UTC), expectedTime: "2023-06-25T00:00:00Z"}, + } + for _, c := range testdata { + formattedTime := formatTimeWithDefaultFormat(&c.input) + require.Equal(t, c.expectedTime, formattedTime) + } +} + +func TestFormatTime(t *testing.T) { + testdata := []struct { + input time.Time + format string + expectedTime string + }{ + {input: time.Date(1955, 4, 5, 8, 50, 27, 4500, time.UTC), format: "2006-01-02T15:04:05.0000000Z", expectedTime: "1955-04-05T08:50:27.0000045Z"}, + {input: time.Date(1955, 4, 5, 8, 50, 27, 4500, time.UTC), format: "", expectedTime: "1955-04-05T08:50:27Z"}, + {input: time.Date(1917, 3, 9, 16, 22, 56, 0, time.UTC), format: "2006-01-02T15:04:05Z", expectedTime: "1917-03-09T16:22:56Z"}, + {input: time.Date(1917, 3, 9, 16, 22, 56, 0, time.UTC), format: "", expectedTime: "1917-03-09T16:22:56Z"}, + {input: time.Date(2021, 1, 5, 22, 15, 0, 0, time.UTC), format: "2006-01-02T15:04Z", expectedTime: "2021-01-05T22:15Z"}, + {input: time.Date(2021, 1, 5, 22, 15, 0, 0, time.UTC), format: "", expectedTime: "2021-01-05T22:15:00Z"}, + {input: time.Date(2023, 6, 25, 0, 0, 0, 0, time.UTC), format: "2006-01-02", expectedTime: "2023-06-25"}, + {input: time.Date(2023, 6, 25, 0, 0, 0, 0, time.UTC), format: "", expectedTime: "2023-06-25T00:00:00Z"}, + } + for _, c := range testdata { + formattedTime := formatTime(&c.input, c.format) + require.Equal(t, c.expectedTime, formattedTime) + } +} + +func TestParseTime(t *testing.T) { + testdata := []struct { + input string + expectedTime time.Time + expectedFormat string + }{ + {input: "1955-04-05T08:50:27.0000045Z", expectedTime: time.Date(1955, 4, 5, 8, 50, 27, 4500, time.UTC), expectedFormat: "2006-01-02T15:04:05.0000000Z"}, + {input: "1917-03-09T16:22:56Z", expectedTime: time.Date(1917, 3, 9, 16, 22, 56, 0, time.UTC), expectedFormat: "2006-01-02T15:04:05Z"}, + {input: "2021-01-05T22:15Z", expectedTime: time.Date(2021, 1, 5, 22, 15, 0, 0, time.UTC), expectedFormat: "2006-01-02T15:04Z"}, + {input: "2023-06-25", expectedTime: time.Date(2023, 6, 25, 0, 0, 0, 0, time.UTC), expectedFormat: "2006-01-02"}, + } + for _, c := range testdata { + parsedTime, format, err := parseTime(c.input) + require.Nil(t, err) + require.Equal(t, c.expectedTime, parsedTime) + require.Equal(t, c.expectedFormat, format) + } +} + +func TestParseTimeNegative(t *testing.T) { + _, _, err := parseTime("notatime") + require.Error(t, err, "fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details") +} + +func TestIPRange_String(t *testing.T) { + testdata := []struct { + inputStart net.IP + inputEnd net.IP + expected string + }{ + {expected: ""}, + {inputStart: net.IPv4(10, 255, 0, 0), expected: "10.255.0.0"}, + {inputStart: net.IPv4(10, 255, 0, 0), inputEnd: net.IPv4(10, 255, 0, 50), expected: "10.255.0.0-10.255.0.50"}, + } + for _, c := range testdata { + var ipRange IPRange + if c.inputStart != nil { + ipRange.Start = c.inputStart + } + if c.inputEnd != nil { + ipRange.End = c.inputEnd + } + require.Equal(t, c.expected, ipRange.String()) + } +} + +func TestSAS(t *testing.T) { + // Note: This is a totally invalid fake SAS, this is just testing our ability to parse different query parameters on a SAS + const sas = "sv=2019-12-12&sr=b&st=2111-01-09T01:42:34.936Z&se=2222-03-09T01:42:34.936Z&sp=rw&sip=168.1.5.60-168.1.5.70&spr=https,http&si=myIdentifier&ss=bf&srt=s&rscc=cc&rscd=cd&rsce=ce&rscl=cl&rsct=ct&sig=clNxbtnkKSHw7f3KMEVVc4agaszoRFdbZr%2FWBmPNsrw%3D" + _url := fmt.Sprintf("https://teststorageaccount.file.core.windows.net/testshare/testpath?%s", sas) + _uri, err := url.Parse(_url) + require.NoError(t, err) + sasQueryParams := NewQueryParameters(_uri.Query(), true) + validateSAS(t, sas, sasQueryParams) +} + +func validateSAS(t *testing.T, sas string, parameters QueryParameters) { + sasCompMap := make(map[string]string) + for _, sasComp := range strings.Split(sas, "&") { + comp := strings.Split(sasComp, "=") + sasCompMap[comp[0]] = comp[1] + } + + require.Equal(t, parameters.Version(), sasCompMap["sv"]) + require.Equal(t, parameters.Services(), sasCompMap["ss"]) + require.Equal(t, parameters.ResourceTypes(), sasCompMap["srt"]) + require.Equal(t, string(parameters.Protocol()), sasCompMap["spr"]) + if _, ok := sasCompMap["st"]; ok { + startTime, _, err := parseTime(sasCompMap["st"]) + require.NoError(t, err) + require.Equal(t, parameters.StartTime(), startTime) + } + if _, ok := sasCompMap["se"]; ok { + endTime, _, err := parseTime(sasCompMap["se"]) + require.NoError(t, err) + require.Equal(t, parameters.ExpiryTime(), endTime) + } + + if _, ok := sasCompMap["sharesnapshot"]; ok { + snapshotTime, _, err := parseTime(sasCompMap["sharesnapshot"]) + require.NoError(t, err) + require.Equal(t, parameters.ShareSnapshotTime(), snapshotTime) + } + ipRange := parameters.IPRange() + require.Equal(t, ipRange.String(), sasCompMap["sip"]) + require.Equal(t, parameters.Identifier(), sasCompMap["si"]) + require.Equal(t, parameters.Resource(), sasCompMap["sr"]) + require.Equal(t, parameters.Permissions(), sasCompMap["sp"]) + + sign, err := url.QueryUnescape(sasCompMap["sig"]) + require.NoError(t, err) + + require.Equal(t, parameters.Signature(), sign) + require.Equal(t, parameters.CacheControl(), sasCompMap["rscc"]) + require.Equal(t, parameters.ContentDisposition(), sasCompMap["rscd"]) + require.Equal(t, parameters.ContentEncoding(), sasCompMap["rsce"]) + require.Equal(t, parameters.ContentLanguage(), sasCompMap["rscl"]) + require.Equal(t, parameters.ContentType(), sasCompMap["rsct"]) +} + +func TestSASInvalidQueryParameter(t *testing.T) { + // Signature is invalid below + const sas = "sv=2019-12-12&signature=clNxbtnkKSHw7f3KMEVVc4agaszoRFdbZr%2FWBmPNsrw%3D&sr=b" + _url := fmt.Sprintf("https://teststorageaccount.file.core.windows.net/testshare/testpath?%s", sas) + _uri, err := url.Parse(_url) + require.NoError(t, err) + NewQueryParameters(_uri.Query(), true) + // NewQueryParameters should not delete signature + require.Contains(t, _uri.Query(), "signature") +} + +func TestEncode(t *testing.T) { + // Note: This is a totally invalid fake SAS, this is just testing our ability to parse different query parameters on a SAS + expected := "rscc=cc&rscd=cd&rsce=ce&rscl=cl&rsct=ct&se=2222-03-09T01%3A42%3A34Z&si=myIdentifier&sig=clNxbtnkKSHw7f3KMEVVc4agaszoRFdbZr%2FWBmPNsrw%3D&sip=168.1.5.60-168.1.5.70&sp=rw&spr=https%2Chttp&sr=b&srt=sco&ss=bf&st=2111-01-09T01%3A42%3A34Z&sv=2019-12-12" + randomOrder := "se=2222-03-09T01:42:34.936Z&rsce=ce&ss=bf&si=myIdentifier&sip=168.1.5.60-168.1.5.70&rscc=cc&srt=sco&sig=clNxbtnkKSHw7f3KMEVVc4agaszoRFdbZr%2FWBmPNsrw%3D&rsct=ct&rscl=cl&sv=2019-12-12&sr=b&st=2111-01-09T01:42:34.936Z&rscd=cd&sp=rw&spr=https,http" + testdata := []string{expected, randomOrder} + + for _, sas := range testdata { + _url := fmt.Sprintf("https://teststorageaccount.file.core.windows.net/testshare/testpath?%s", sas) + _uri, err := url.Parse(_url) + require.NoError(t, err) + queryParams := NewQueryParameters(_uri.Query(), true) + require.Equal(t, expected, queryParams.Encode()) + } +} diff --git a/sdk/storage/azfile/sas/service.go b/sdk/storage/azfile/sas/service.go new file mode 100644 index 000000000000..50192f9ef58b --- /dev/null +++ b/sdk/storage/azfile/sas/service.go @@ -0,0 +1,227 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package sas + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" +) + +// SignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage file or share. +// For more information on creating service sas, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas +// User Delegation SAS not supported for files service +type SignatureValues struct { + Version string `param:"sv"` // If not specified, this defaults to Version + Protocol Protocol `param:"spr"` // See the Protocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + SnapshotTime time.Time + Permissions string `param:"sp"` // Create by initializing SharePermissions or FilePermissions and then call String() + IPRange IPRange `param:"sip"` + Identifier string `param:"si"` + ShareName string + FilePath string // Ex: "directory/FileName". Use "" to create a Share SAS and file path for File SAS. + CacheControl string // rscc + ContentDisposition string // rscd + ContentEncoding string // rsce + ContentLanguage string // rscl + ContentType string // rsct +} + +// SignWithSharedKey uses an account's SharedKeyCredential to sign this signature values to produce the proper SAS query parameters. +func (v SignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { + if v.ExpiryTime.IsZero() || v.Permissions == "" { + return QueryParameters{}, errors.New("service SAS is missing at least one of these: ExpiryTime or Permissions") + } + + resource := "s" + if v.FilePath == "" { + //Make sure the permission characters are in the correct order + perms, err := parseSharePermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } else { + resource = "f" + // Make sure the permission characters are in the correct order + perms, err := parseFilePermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } + + if v.Version == "" { + v.Version = Version + } + startTime, expiryTime, _ := formatTimesForSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) + + // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + stringToSign := strings.Join([]string{ + v.Permissions, + startTime, + expiryTime, + getCanonicalName(sharedKeyCredential.AccountName(), v.ShareName, v.FilePath), + v.Identifier, + v.IPRange.String(), + string(v.Protocol), + v.Version, + v.CacheControl, // rscc + v.ContentDisposition, // rscd + v.ContentEncoding, // rsce + v.ContentLanguage, // rscl + v.ContentType}, // rsct + "\n") + + signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Share/File-specific SAS parameters + resource: resource, + identifier: v.Identifier, + cacheControl: v.CacheControl, + contentDisposition: v.ContentDisposition, + contentEncoding: v.ContentEncoding, + contentLanguage: v.ContentLanguage, + contentType: v.ContentType, + shareSnapshotTime: v.SnapshotTime, + // Calculated SAS signature + signature: signature, + } + + return p, nil +} + +// getCanonicalName computes the canonical name for a share or file resource for SAS signing. +func getCanonicalName(account string, shareName string, filePath string) string { + // Share: "/file/account/sharename" + // File: "/file/account/sharename/filename" + // File: "/file/account/sharename/directoryname/filename" + elements := []string{"/file/", account, "/", shareName} + if filePath != "" { + dfp := strings.Replace(filePath, "\\", "/", -1) + if dfp[0] == '/' { + dfp = dfp[1:] + } + elements = append(elements, "/", dfp) + } + return strings.Join(elements, "") +} + +// SharePermissions type simplifies creating the permissions string for an Azure Storage share SAS. +// Initialize an instance of this type and then call its String method to set SignatureValues' Permissions field. +// All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-share +type SharePermissions struct { + Read, Create, Write, Delete, List bool +} + +// String produces the SAS permissions string for an Azure Storage share. +// Call this method to set SignatureValues' Permissions field. +func (p *SharePermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.List { + b.WriteRune('l') + } + return b.String() +} + +// parseSharePermissions initializes SharePermissions' fields from a string. +func parseSharePermissions(s string) (SharePermissions, error) { + p := SharePermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + default: + return SharePermissions{}, fmt.Errorf("invalid permission: '%v'", r) + } + } + return p, nil +} + +// FilePermissions type simplifies creating the permissions string for an Azure Storage file SAS. +// Initialize an instance of this type and then call its String method to set SignatureValues' Permissions field. +// All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-file +type FilePermissions struct { + Read, Create, Write, Delete bool +} + +// String produces the SAS permissions string for an Azure Storage file. +// Call this method to set SignatureValues' Permissions field. +func (p *FilePermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + return b.String() +} + +// parseFilePermissions initializes the FilePermissions' fields from a string. +func parseFilePermissions(s string) (FilePermissions, error) { + p := FilePermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + default: + return FilePermissions{}, fmt.Errorf("invalid permission: '%v'", r) + } + } + return p, nil +} diff --git a/sdk/storage/azfile/sas/service_test.go b/sdk/storage/azfile/sas/service_test.go new file mode 100644 index 000000000000..dd640be0e4fc --- /dev/null +++ b/sdk/storage/azfile/sas/service_test.go @@ -0,0 +1,147 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "github.com/stretchr/testify/require" + "testing" +) + +func TestSharePermissions_String(t *testing.T) { + testdata := []struct { + input SharePermissions + expected string + }{ + {input: SharePermissions{Read: true}, expected: "r"}, + {input: SharePermissions{Create: true}, expected: "c"}, + {input: SharePermissions{Write: true}, expected: "w"}, + {input: SharePermissions{Delete: true}, expected: "d"}, + {input: SharePermissions{List: true}, expected: "l"}, + {input: SharePermissions{ + Read: true, + Create: true, + Write: true, + Delete: true, + List: true, + }, expected: "rcwdl"}, + } + for _, c := range testdata { + require.Equal(t, c.expected, c.input.String()) + } +} + +func TestSharePermissions_Parse(t *testing.T) { + testdata := []struct { + input string + expected SharePermissions + }{ + {expected: SharePermissions{Read: true}, input: "r"}, + {expected: SharePermissions{Create: true}, input: "c"}, + {expected: SharePermissions{Write: true}, input: "w"}, + {expected: SharePermissions{Delete: true}, input: "d"}, + {expected: SharePermissions{List: true}, input: "l"}, + {expected: SharePermissions{ + Read: true, + Create: true, + Write: true, + Delete: true, + List: true, + }, input: "rcwdl"}, + {expected: SharePermissions{ + Read: true, + Create: true, + Write: true, + Delete: true, + List: true, + }, input: "cwrdl"}, // Wrong order parses correctly + } + for _, c := range testdata { + permissions, err := parseSharePermissions(c.input) + require.Nil(t, err) + require.Equal(t, c.expected, permissions) + } +} + +func TestSharePermissions_ParseNegative(t *testing.T) { + _, err := parseSharePermissions("cwtrdl") // Here 't' is invalid + require.NotNil(t, err) + require.Contains(t, err.Error(), "116") +} + +func TestFilePermissions_String(t *testing.T) { + testdata := []struct { + input FilePermissions + expected string + }{ + {input: FilePermissions{Read: true}, expected: "r"}, + {input: FilePermissions{Create: true}, expected: "c"}, + {input: FilePermissions{Write: true}, expected: "w"}, + {input: FilePermissions{Delete: true}, expected: "d"}, + {input: FilePermissions{ + Read: true, + Create: true, + Write: true, + Delete: true, + }, expected: "rcwd"}, + } + for _, c := range testdata { + require.Equal(t, c.expected, c.input.String()) + } +} + +func TestFilePermissions_Parse(t *testing.T) { + testdata := []struct { + expected FilePermissions + input string + }{ + {expected: FilePermissions{Read: true}, input: "r"}, + {expected: FilePermissions{Create: true}, input: "c"}, + {expected: FilePermissions{Write: true}, input: "w"}, + {expected: FilePermissions{Delete: true}, input: "d"}, + {expected: FilePermissions{ + Read: true, + Create: true, + Write: true, + Delete: true, + }, input: "rcwd"}, + {expected: FilePermissions{ + Read: true, + Create: true, + Write: true, + Delete: true, + }, input: "wcrd"}, // Wrong order parses correctly + } + for _, c := range testdata { + permissions, err := parseFilePermissions(c.input) + require.Nil(t, err) + require.Equal(t, c.expected, permissions) + } +} + +func TestFilePermissions_ParseNegative(t *testing.T) { + _, err := parseFilePermissions("wcrdf") // Here 'f' is invalid + require.NotNil(t, err) + require.Contains(t, err.Error(), "102") +} + +func TestGetCanonicalName(t *testing.T) { + testdata := []struct { + inputAccount string + inputShare string + inputFilePath string + expected string + }{ + {inputAccount: "fakestorageaccount", inputShare: "fakestorageshare", expected: "/file/fakestorageaccount/fakestorageshare"}, + {inputAccount: "fakestorageaccount", inputShare: "fakestorageshare", inputFilePath: "fakestoragefile", expected: "/file/fakestorageaccount/fakestorageshare/fakestoragefile"}, + {inputAccount: "fakestorageaccount", inputShare: "fakestorageshare", inputFilePath: "fakestoragedirectory/fakestoragefile", expected: "/file/fakestorageaccount/fakestorageshare/fakestoragedirectory/fakestoragefile"}, + {inputAccount: "fakestorageaccount", inputShare: "fakestorageshare", inputFilePath: "fakestoragedirectory\\fakestoragefile", expected: "/file/fakestorageaccount/fakestorageshare/fakestoragedirectory/fakestoragefile"}, + {inputAccount: "fakestorageaccount", inputShare: "fakestorageshare", inputFilePath: "fakestoragedirectory", expected: "/file/fakestorageaccount/fakestorageshare/fakestoragedirectory"}, + } + for _, c := range testdata { + require.Equal(t, c.expected, getCanonicalName(c.inputAccount, c.inputShare, c.inputFilePath)) + } +} diff --git a/sdk/storage/azfile/sas/url_parts.go b/sdk/storage/azfile/sas/url_parts.go new file mode 100644 index 000000000000..3f741c921fd3 --- /dev/null +++ b/sdk/storage/azfile/sas/url_parts.go @@ -0,0 +1,147 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" + "net/url" + "strings" +) + +const ( + shareSnapshot = "sharesnapshot" +) + +// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator. +// Ex: "https://10.132.141.33/accountname/sharename" +type IPEndpointStyleInfo struct { + AccountName string // "" if not using IP endpoint style +} + +// URLParts object represents the components that make up an Azure Storage Share/Directory/File URL. You parse an +// existing URL into its parts by calling NewFileURLParts(). You construct a URL from parts by calling URL(). +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type URLParts struct { + Scheme string // Ex: "https://" + Host string // Ex: "account.share.core.windows.net", "10.132.141.33", "10.132.141.33:80" + IPEndpointStyleInfo IPEndpointStyleInfo // Useful Parts for IP endpoint style URL. + ShareName string // Share name, Ex: "myshare" + DirectoryOrFilePath string // Path of directory or file, Ex: "mydirectory/myfile" + ShareSnapshot string // IsZero is true if not a snapshot + SAS QueryParameters + UnparsedParams string +} + +// ParseURL parses a URL initializing URLParts' fields including any SAS-related & sharesnapshot query parameters. +// Any other query parameters remain in the UnparsedParams field. +func ParseURL(u string) (URLParts, error) { + uri, err := url.Parse(u) + if err != nil { + return URLParts{}, err + } + + up := URLParts{ + Scheme: uri.Scheme, + Host: uri.Host, + } + + if uri.Path != "" { + path := uri.Path + if path[0] == '/' { + path = path[1:] + } + if shared.IsIPEndpointStyle(up.Host) { + if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no share, path of directory or file + up.IPEndpointStyleInfo.AccountName = path + path = "" // no ShareName present in the URL so path should be empty + } else { + up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes + path = path[accountEndIndex+1:] + } + } + + shareEndIndex := strings.Index(path, "/") // Find the next slash (if it exists) + if shareEndIndex == -1 { // Slash not found; path has share name & no path of directory or file + up.ShareName = path + } else { // Slash found; path has share name & path of directory or file + up.ShareName = path[:shareEndIndex] + up.DirectoryOrFilePath = path[shareEndIndex+1:] + } + } + + // Convert the query parameters to a case-sensitive map & trim whitespace + paramsMap := uri.Query() + + up.ShareSnapshot = "" // Assume no snapshot + if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(shareSnapshot); ok { + up.ShareSnapshot = snapshotStr[0] + // If we recognized the query parameter, remove it from the map + delete(paramsMap, shareSnapshot) + } + + up.SAS = NewQueryParameters(paramsMap, true) + up.UnparsedParams = paramsMap.Encode() + return up, nil +} + +// String returns a URL object whose fields are initialized from the URLParts fields. The URL's RawQuery +// field contains the SAS, snapshot, and unparsed query parameters. +func (up URLParts) String() string { + path := "" + // Concatenate account name for IP endpoint style URL + if shared.IsIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" { + path += "/" + up.IPEndpointStyleInfo.AccountName + } + // Concatenate share & path of directory or file (if they exist) + if up.ShareName != "" { + path += "/" + up.ShareName + if up.DirectoryOrFilePath != "" { + path += "/" + up.DirectoryOrFilePath + } + } + + rawQuery := up.UnparsedParams + + //If no snapshot is initially provided, fill it in from the SAS query properties to help the user + if up.ShareSnapshot == "" && !up.SAS.ShareSnapshotTime().IsZero() { + up.ShareSnapshot = up.SAS.ShareSnapshotTime().Format(SnapshotTimeFormat) + } + + // Concatenate share snapshot query parameter (if it exists) + if up.ShareSnapshot != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += shareSnapshot + "=" + up.ShareSnapshot + } + sas := up.SAS.Encode() + if sas != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += sas + } + u := url.URL{ + Scheme: up.Scheme, + Host: up.Host, + Path: path, + RawQuery: rawQuery, + } + return u.String() +} + +type caseInsensitiveValues url.Values // map[string][]string + +func (values caseInsensitiveValues) Get(key string) ([]string, bool) { + key = strings.ToLower(key) + for k, v := range values { + if strings.ToLower(k) == key { + return v, true + } + } + return []string{}, false +} diff --git a/sdk/storage/azfile/sas/url_parts_test.go b/sdk/storage/azfile/sas/url_parts_test.go new file mode 100644 index 000000000000..21691e0a7ae7 --- /dev/null +++ b/sdk/storage/azfile/sas/url_parts_test.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParseURLIPStyle(t *testing.T) { + urlWithIP := "https://127.0.0.1:5000/fakestorageaccount" + fileURLParts, err := ParseURL(urlWithIP) + require.NoError(t, err) + require.Equal(t, fileURLParts.Scheme, "https") + require.Equal(t, fileURLParts.Host, "127.0.0.1:5000") + require.Equal(t, fileURLParts.IPEndpointStyleInfo.AccountName, "fakestorageaccount") + + urlWithIP = "https://127.0.0.1:5000/fakestorageaccount/fakeshare" + fileURLParts, err = ParseURL(urlWithIP) + require.NoError(t, err) + require.Equal(t, fileURLParts.Scheme, "https") + require.Equal(t, fileURLParts.Host, "127.0.0.1:5000") + require.Equal(t, fileURLParts.IPEndpointStyleInfo.AccountName, "fakestorageaccount") + require.Equal(t, fileURLParts.ShareName, "fakeshare") + + urlWithIP = "https://127.0.0.1:5000/fakestorageaccount/fakeshare/fakefile" + fileURLParts, err = ParseURL(urlWithIP) + require.NoError(t, err) + require.Equal(t, fileURLParts.Scheme, "https") + require.Equal(t, fileURLParts.Host, "127.0.0.1:5000") + require.Equal(t, fileURLParts.IPEndpointStyleInfo.AccountName, "fakestorageaccount") + require.Equal(t, fileURLParts.ShareName, "fakeshare") + require.Equal(t, fileURLParts.DirectoryOrFilePath, "fakefile") +} + +func TestParseURL(t *testing.T) { + testStorageAccount := "fakestorageaccount" + host := fmt.Sprintf("%s.file.core.windows.net", testStorageAccount) + testShare := "fakeshare" + fileNames := []string{"/._.TESTT.txt", "/.gitignore/dummyfile1"} + + const sasStr = "sv=2019-12-12&sr=b&st=2111-01-09T01:42:34.936Z&se=2222-03-09T01:42:34.936Z&sp=rw&sip=168.1.5.60-168.1.5.70&spr=https,http&si=myIdentifier&ss=bf&srt=s&sig=clNxbtnkKSHw7f3KMEVVc4agaszoRFdbZr%2FWBmPNsrw%3D" + + for _, fileName := range fileNames { + sasURL := fmt.Sprintf("https://%s.file.core.windows.net/%s%s?%s", testStorageAccount, testShare, fileName, sasStr) + fileURLParts, err := ParseURL(sasURL) + require.NoError(t, err) + + require.Equal(t, fileURLParts.Scheme, "https") + require.Equal(t, fileURLParts.Host, host) + require.Equal(t, fileURLParts.ShareName, testShare) + + validateSAS(t, sasStr, fileURLParts.SAS) + } + + for _, fileName := range fileNames { + shareSnapshotID := "2011-03-09T01:42:34Z" + sasWithShareSnapshotID := "?sharesnapshot=" + shareSnapshotID + "&" + sasStr + urlWithShareSnapshot := fmt.Sprintf("https://%s.file.core.windows.net/%s%s%s", testStorageAccount, testShare, fileName, sasWithShareSnapshotID) + fileURLParts, err := ParseURL(urlWithShareSnapshot) + require.NoError(t, err) + + require.Equal(t, fileURLParts.Scheme, "https") + require.Equal(t, fileURLParts.Host, host) + require.Equal(t, fileURLParts.ShareName, testShare) + + validateSAS(t, sasStr, fileURLParts.SAS) + } +} diff --git a/sdk/storage/azfile/service/client.go b/sdk/storage/azfile/service/client.go new file mode 100644 index 000000000000..89bf5f02c5a3 --- /dev/null +++ b/sdk/storage/azfile/service/client.go @@ -0,0 +1,214 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "net/http" + "strings" + "time" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions base.ClientOptions + +// Client represents a URL to the Azure File Storage service allowing you to manipulate file shares. +type Client base.Client[generated.ServiceClient] + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a storage account or with a shared access signature (SAS) token. +// - serviceURL - the URL of the storage account e.g. https://.file.core.windows.net/? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewServiceClient(serviceURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.file.core.windows.net/ +// - cred - a SharedKeyCredential created with the matching storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewServiceClient(serviceURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (s *Client) generated() *generated.ServiceClient { + return base.InnerClient((*base.Client[generated.ServiceClient])(s)) +} + +func (s *Client) sharedKey() *SharedKeyCredential { + return base.SharedKey((*base.Client[generated.ServiceClient])(s)) +} + +// URL returns the URL endpoint used by the Client object. +func (s *Client) URL() string { + return s.generated().Endpoint() +} + +// NewShareClient creates a new share.Client object by concatenating shareName to the end of this Client's URL. +// The new share.Client uses the same request policy pipeline as the Client. +func (s *Client) NewShareClient(shareName string) *share.Client { + shareURL := runtime.JoinPaths(s.generated().Endpoint(), shareName) + return (*share.Client)(base.NewShareClient(shareURL, s.generated().Pipeline(), s.sharedKey())) +} + +// CreateShare is a lifecycle method to creates a new share under the specified account. +// If the share with the same name already exists, a ResourceExistsError will be raised. +// This method returns a client with which to interact with the newly created share. +// For more information see, https://learn.microsoft.com/en-us/rest/api/storageservices/create-share. +func (s *Client) CreateShare(ctx context.Context, shareName string, options *CreateShareOptions) (CreateShareResponse, error) { + shareClient := s.NewShareClient(shareName) + createShareResp, err := shareClient.Create(ctx, options) + return createShareResp, err +} + +// DeleteShare is a lifecycle method that marks the specified share for deletion. +// The share and any files contained within it are later deleted during garbage collection. +// If the share is not found, a ResourceNotFoundError will be raised. +// For more information see, https://learn.microsoft.com/en-us/rest/api/storageservices/delete-share. +func (s *Client) DeleteShare(ctx context.Context, shareName string, options *DeleteShareOptions) (DeleteShareResponse, error) { + shareClient := s.NewShareClient(shareName) + deleteShareResp, err := shareClient.Delete(ctx, options) + return deleteShareResp, err +} + +// RestoreShare restores soft-deleted share. +// Operation will only be successful if used within the specified number of days set in the delete retention policy. +// For more information see, https://learn.microsoft.com/en-us/rest/api/storageservices/restore-share. +func (s *Client) RestoreShare(ctx context.Context, deletedShareName string, deletedShareVersion string, options *RestoreShareOptions) (RestoreShareResponse, error) { + shareClient := s.NewShareClient(deletedShareName) + createShareResp, err := shareClient.Restore(ctx, deletedShareVersion, options) + return createShareResp, err +} + +// GetProperties operation gets the properties of a storage account's File service. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-file-service-properties. +func (s *Client) GetProperties(ctx context.Context, options *GetPropertiesOptions) (GetPropertiesResponse, error) { + opts := options.format() + resp, err := s.generated().GetProperties(ctx, opts) + return resp, err +} + +// SetProperties operation sets properties for a storage account's File service endpoint. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-file-service-properties. +func (s *Client) SetProperties(ctx context.Context, options *SetPropertiesOptions) (SetPropertiesResponse, error) { + svcProperties, o := options.format() + resp, err := s.generated().SetProperties(ctx, svcProperties, o) + return resp, err +} + +// NewListSharesPager operation returns a pager of the shares under the specified account. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/list-shares +func (s *Client) NewListSharesPager(options *ListSharesOptions) *runtime.Pager[ListSharesSegmentResponse] { + listOptions := generated.ServiceClientListSharesSegmentOptions{} + if options != nil { + if options.Include.Deleted { + listOptions.Include = append(listOptions.Include, ListSharesIncludeTypeDeleted) + } + if options.Include.Metadata { + listOptions.Include = append(listOptions.Include, ListSharesIncludeTypeMetadata) + } + if options.Include.Snapshots { + listOptions.Include = append(listOptions.Include, ListSharesIncludeTypeSnapshots) + } + listOptions.Marker = options.Marker + listOptions.Maxresults = options.MaxResults + listOptions.Prefix = options.Prefix + } + + return runtime.NewPager(runtime.PagingHandler[ListSharesSegmentResponse]{ + More: func(page ListSharesSegmentResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *ListSharesSegmentResponse) (ListSharesSegmentResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = s.generated().ListSharesSegmentCreateRequest(ctx, &listOptions) + } else { + listOptions.Marker = page.NextMarker + req, err = s.generated().ListSharesSegmentCreateRequest(ctx, &listOptions) + } + if err != nil { + return ListSharesSegmentResponse{}, err + } + resp, err := s.generated().Pipeline().Do(req) + if err != nil { + return ListSharesSegmentResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListSharesSegmentResponse{}, runtime.NewResponseError(resp) + } + return s.generated().ListSharesSegmentHandleResponse(resp) + }, + }) +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at account. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (s *Client) GetSASURL(resources sas.AccountResourceTypes, permissions sas.AccountPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { + if s.sharedKey() == nil { + return "", fileerror.MissingSharedKeyCredential + } + st := o.format() + qps, err := sas.AccountSignatureValues{ + Version: sas.Version, + Protocol: sas.ProtocolHTTPS, + Permissions: permissions.String(), + ResourceTypes: resources.String(), + StartTime: st, + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(s.sharedKey()) + if err != nil { + return "", err + } + + endpoint := s.URL() + if !strings.HasSuffix(endpoint, "/") { + // add a trailing slash to be consistent with the portal + endpoint += "/" + } + endpoint += "?" + qps.Encode() + + return endpoint, nil +} diff --git a/sdk/storage/azfile/service/client_test.go b/sdk/storage/azfile/service/client_test.go new file mode 100644 index 000000000000..d9c3642c4628 --- /dev/null +++ b/sdk/storage/azfile/service/client_test.go @@ -0,0 +1,454 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service_test + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/testcommon" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "strconv" + "testing" + "time" +) + +func Test(t *testing.T) { + recordMode := recording.GetRecordMode() + t.Logf("Running service Tests in %s mode\n", recordMode) + if recordMode == recording.LiveMode { + suite.Run(t, &ServiceRecordedTestsSuite{}) + suite.Run(t, &ServiceUnrecordedTestsSuite{}) + } else if recordMode == recording.PlaybackMode { + suite.Run(t, &ServiceRecordedTestsSuite{}) + } else if recordMode == recording.RecordingMode { + suite.Run(t, &ServiceRecordedTestsSuite{}) + } +} + +func (s *ServiceRecordedTestsSuite) BeforeTest(suite string, test string) { + testcommon.BeforeTest(s.T(), suite, test) +} + +func (s *ServiceRecordedTestsSuite) AfterTest(suite string, test string) { + testcommon.AfterTest(s.T(), suite, test) +} + +func (s *ServiceUnrecordedTestsSuite) BeforeTest(suite string, test string) { + +} + +func (s *ServiceUnrecordedTestsSuite) AfterTest(suite string, test string) { + +} + +type ServiceRecordedTestsSuite struct { + suite.Suite +} + +type ServiceUnrecordedTestsSuite struct { + suite.Suite +} + +func (s *ServiceRecordedTestsSuite) TestAccountNewServiceURLValidName() { + _require := require.New(s.T()) + + accountName, _ := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + correctURL := "https://" + accountName + "." + testcommon.DefaultFileEndpointSuffix + _require.Equal(svcClient.URL(), correctURL) +} + +func (s *ServiceRecordedTestsSuite) TestAccountNewShareURLValidName() { + _require := require.New(s.T()) + testName := s.T().Name() + + accountName, _ := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := svcClient.NewShareClient(shareName) + _require.NoError(err) + + correctURL := "https://" + accountName + "." + testcommon.DefaultFileEndpointSuffix + shareName + _require.Equal(shareClient.URL(), correctURL) +} + +func (s *ServiceRecordedTestsSuite) TestServiceClientFromConnectionString() { + _require := require.New(s.T()) + + svcClient, err := testcommon.GetServiceClientFromConnectionString(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + resp, err := svcClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(resp.RequestID) +} + +func (s *ServiceRecordedTestsSuite) TestAccountProperties() { + _require := require.New(s.T()) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + setPropertiesOptions := &service.SetPropertiesOptions{ + HourMetrics: &service.Metrics{ + Enabled: to.Ptr(true), + IncludeAPIs: to.Ptr(true), + RetentionPolicy: &service.RetentionPolicy{ + Enabled: to.Ptr(true), + Days: to.Ptr(int32(2)), + }, + }, + MinuteMetrics: &service.Metrics{ + Enabled: to.Ptr(true), + IncludeAPIs: to.Ptr(false), + RetentionPolicy: &service.RetentionPolicy{ + Enabled: to.Ptr(true), + Days: to.Ptr(int32(2)), + }, + }, + CORS: []*service.CORSRule{ + { + AllowedOrigins: to.Ptr("*"), + AllowedMethods: to.Ptr("PUT"), + AllowedHeaders: to.Ptr("x-ms-client-request-id"), + ExposedHeaders: to.Ptr("x-ms-*"), + MaxAgeInSeconds: to.Ptr(int32(2)), + }, + }, + } + + setPropsResp, err := svcClient.SetProperties(context.Background(), setPropertiesOptions) + _require.NoError(err) + _require.NotNil(setPropsResp.RequestID) + + time.Sleep(time.Second * 30) + + getPropsResp, err := svcClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(getPropsResp.RequestID) + _require.EqualValues(getPropsResp.HourMetrics.RetentionPolicy.Enabled, setPropertiesOptions.HourMetrics.RetentionPolicy.Enabled) + _require.EqualValues(getPropsResp.HourMetrics.RetentionPolicy.Days, setPropertiesOptions.HourMetrics.RetentionPolicy.Days) + _require.EqualValues(getPropsResp.MinuteMetrics.RetentionPolicy.Enabled, setPropertiesOptions.MinuteMetrics.RetentionPolicy.Enabled) + _require.EqualValues(getPropsResp.MinuteMetrics.RetentionPolicy.Days, setPropertiesOptions.MinuteMetrics.RetentionPolicy.Days) + _require.EqualValues(len(getPropsResp.CORS), len(setPropertiesOptions.CORS)) +} + +func (s *ServiceRecordedTestsSuite) TestAccountHourMetrics() { + _require := require.New(s.T()) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + setPropertiesOptions := &service.SetPropertiesOptions{ + HourMetrics: &service.Metrics{ + Enabled: to.Ptr(true), + IncludeAPIs: to.Ptr(true), + RetentionPolicy: &service.RetentionPolicy{ + Enabled: to.Ptr(true), + Days: to.Ptr(int32(5)), + }, + }, + } + _, err = svcClient.SetProperties(context.Background(), setPropertiesOptions) + _require.NoError(err) +} + +func (s *ServiceRecordedTestsSuite) TestAccountListSharesNonDefault() { + _require := require.New(s.T()) + testName := s.T().Name() + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + mySharePrefix := testcommon.GenerateEntityName(testName) + pager := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Prefix: to.Ptr(mySharePrefix), + }) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + _require.NotNil(resp.Prefix) + _require.Equal(*resp.Prefix, mySharePrefix) + _require.NotNil(resp.ServiceEndpoint) + _require.NotNil(resp.Version) + _require.Len(resp.Shares, 0) + } + + shareClients := map[string]*share.Client{} + for i := 0; i < 4; i++ { + shareName := mySharePrefix + "share" + strconv.Itoa(i) + shareClients[shareName] = testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClients[shareName]) + + _, err := shareClients[shareName].SetMetadata(context.Background(), &share.SetMetadataOptions{ + Metadata: testcommon.BasicMetadata, + }) + _require.NoError(err) + } + + pager = svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Metadata: true, Snapshots: true}, + Prefix: to.Ptr(mySharePrefix), + MaxResults: to.Ptr(int32(2)), + }) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + if len(resp.Shares) > 0 { + _require.Len(resp.Shares, 2) + } + for _, shareItem := range resp.Shares { + _require.NotNil(shareItem.Properties) + _require.NotNil(shareItem.Properties.LastModified) + _require.NotNil(shareItem.Properties.ETag) + _require.EqualValues(shareItem.Metadata, testcommon.BasicMetadata) + } + } +} + +func (s *ServiceUnrecordedTestsSuite) TestSASServiceClientRestoreShare() { + _require := require.New(s.T()) + testName := s.T().Name() + cred, _ := testcommon.GetGenericSharedKeyCredential(testcommon.TestAccountDefault) + + serviceClient, err := service.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.file.core.windows.net/", cred.AccountName()), cred, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + + // Note: Always set all permissions, services, types to true to ensure order of string formed is correct. + resources := sas.AccountResourceTypes{ + Object: true, + Service: true, + Container: true, + } + permissions := sas.AccountPermissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Create: true, + } + expiry := time.Now().Add(time.Hour) + sasUrl, err := serviceClient.GetSASURL(resources, permissions, expiry, nil) + _require.NoError(err) + + svcClient, err := testcommon.GetServiceClientNoCredential(s.T(), sasUrl, nil) + _require.NoError(err) + + resp, err := svcClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(resp.RequestID) + + // create share using account SAS + _, err = svcClient.CreateShare(context.Background(), shareName, nil) + _require.NoError(err) + + defer func() { + _, err := svcClient.DeleteShare(context.Background(), shareName, nil) + _require.NoError(err) + }() + + _, err = svcClient.DeleteShare(context.Background(), shareName, nil) + _require.NoError(err) + + // wait for share deletion + time.Sleep(60 * time.Second) + + sharesCnt := 0 + shareVersion := "" + + pager := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Deleted: true}, + Prefix: &shareName, + }) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + for _, s := range resp.Shares { + if s.Deleted != nil && *s.Deleted { + _require.NotNil(s.Version) + shareVersion = *s.Version + } else { + sharesCnt++ + } + } + } + + _require.Equal(sharesCnt, 0) + _require.NotEmpty(shareVersion) + + restoreResp, err := svcClient.RestoreShare(context.Background(), shareName, shareVersion, nil) + _require.NoError(err) + _require.NotNil(restoreResp.RequestID) + + sharesCnt = 0 + pager = svcClient.NewListSharesPager(&service.ListSharesOptions{ + Prefix: &shareName, + }) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + sharesCnt += len(resp.Shares) + } + _require.Equal(sharesCnt, 1) +} + +func (s *ServiceRecordedTestsSuite) TestSASServiceClientNoKey() { + _require := require.New(s.T()) + accountName, _ := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + + serviceClient, err := service.NewClientWithNoCredential(fmt.Sprintf("https://%s.file.core.windows.net/", accountName), nil) + _require.NoError(err) + resources := sas.AccountResourceTypes{ + Object: true, + Service: true, + Container: true, + } + permissions := sas.AccountPermissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Create: true, + } + + expiry := time.Now().Add(time.Hour) + _, err = serviceClient.GetSASURL(resources, permissions, expiry, nil) + _require.Equal(err, fileerror.MissingSharedKeyCredential) +} + +func (s *ServiceRecordedTestsSuite) TestSASServiceClientSignNegative() { + _require := require.New(s.T()) + accountName, accountKey := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + _require.Greater(len(accountKey), 0) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + _require.NoError(err) + + serviceClient, err := service.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.file.core.windows.net/", accountName), cred, nil) + _require.NoError(err) + resources := sas.AccountResourceTypes{ + Object: true, + Service: true, + Container: true, + } + permissions := sas.AccountPermissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Create: true, + } + expiry := time.Time{} + + // zero expiry time + _, err = serviceClient.GetSASURL(resources, permissions, expiry, &service.GetSASURLOptions{StartTime: to.Ptr(time.Now())}) + _require.Equal(err.Error(), "account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") + + // zero start and expiry time + _, err = serviceClient.GetSASURL(resources, permissions, expiry, &service.GetSASURLOptions{}) + _require.Equal(err.Error(), "account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") + + // empty permissions + _, err = serviceClient.GetSASURL(sas.AccountResourceTypes{}, sas.AccountPermissions{}, expiry, nil) + _require.Equal(err.Error(), "account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") +} + +func (s *ServiceRecordedTestsSuite) TestServiceSetPropertiesDefault() { + _require := require.New(s.T()) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + _, err = svcClient.SetProperties(context.Background(), nil) + _require.NoError(err) +} + +func (s *ServiceRecordedTestsSuite) TestServiceCreateDeleteRestoreShare() { + _require := require.New(s.T()) + testName := s.T().Name() + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + + _, err = svcClient.CreateShare(context.Background(), shareName, nil) + _require.NoError(err) + + defer func() { + _, err := svcClient.DeleteShare(context.Background(), shareName, nil) + _require.NoError(err) + }() + + _, err = svcClient.DeleteShare(context.Background(), shareName, nil) + _require.NoError(err) + + // wait for share deletion + time.Sleep(60 * time.Second) + + sharesCnt := 0 + shareVersion := "" + + pager := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Deleted: true}, + Prefix: &shareName, + }) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + for _, s := range resp.Shares { + if s.Deleted != nil && *s.Deleted { + _require.NotNil(s.Version) + shareVersion = *s.Version + } else { + sharesCnt++ + } + } + } + + _require.Equal(sharesCnt, 0) + _require.NotEmpty(shareVersion) + + restoreResp, err := svcClient.RestoreShare(context.Background(), shareName, shareVersion, nil) + _require.NoError(err) + _require.NotNil(restoreResp.RequestID) + + sharesCnt = 0 + pager = svcClient.NewListSharesPager(&service.ListSharesOptions{ + Prefix: &shareName, + }) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + sharesCnt += len(resp.Shares) + } + _require.Equal(sharesCnt, 1) +} diff --git a/sdk/storage/azfile/service/constants.go b/sdk/storage/azfile/service/constants.go new file mode 100644 index 000000000000..a936067376b4 --- /dev/null +++ b/sdk/storage/azfile/service/constants.go @@ -0,0 +1,37 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + +// ListSharesIncludeType defines values for ListSharesIncludeType +type ListSharesIncludeType = generated.ListSharesIncludeType + +const ( + ListSharesIncludeTypeSnapshots ListSharesIncludeType = generated.ListSharesIncludeTypeSnapshots + ListSharesIncludeTypeMetadata ListSharesIncludeType = generated.ListSharesIncludeTypeMetadata + ListSharesIncludeTypeDeleted ListSharesIncludeType = generated.ListSharesIncludeTypeDeleted +) + +// PossibleListSharesIncludeTypeValues returns the possible values for the ListSharesIncludeType const type. +func PossibleListSharesIncludeTypeValues() []ListSharesIncludeType { + return generated.PossibleListSharesIncludeTypeValues() +} + +// ShareRootSquash defines values for the root squashing behavior on the share when NFS is enabled. If it's not specified, the default is NoRootSquash. +type ShareRootSquash = generated.ShareRootSquash + +const ( + RootSquashNoRootSquash ShareRootSquash = generated.ShareRootSquashNoRootSquash + RootSquashRootSquash ShareRootSquash = generated.ShareRootSquashRootSquash + RootSquashAllSquash ShareRootSquash = generated.ShareRootSquashAllSquash +) + +// PossibleShareRootSquashValues returns the possible values for the RootSquash const type. +func PossibleShareRootSquashValues() []ShareRootSquash { + return generated.PossibleShareRootSquashValues() +} diff --git a/sdk/storage/azfile/service/examples_test.go b/sdk/storage/azfile/service/examples_test.go new file mode 100644 index 000000000000..bc7a2e4cd6dd --- /dev/null +++ b/sdk/storage/azfile/service/examples_test.go @@ -0,0 +1,308 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service_test + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "log" + "os" + "time" +) + +func handleError(err error) { + if err != nil { + log.Fatal(err.Error()) + } +} + +func Example_service_Client_NewClient() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + fmt.Println(svcClient.URL()) +} + +func Example_service_NewClientFromConnectionString() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + + svcClient, err := service.NewClientFromConnectionString(connectionString, nil) + handleError(err) + + fmt.Println(svcClient.URL()) +} + +func Example_service_Client_NewShareClient() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + shareName := "testShare" + shareClient := svcClient.NewShareClient(shareName) + + fmt.Println(shareClient.URL()) +} + +func Example_service_Client_CreateShare() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + shareName := "testShare" + _, err = svcClient.CreateShare(context.TODO(), shareName, nil) + handleError(err) + fmt.Println("Share created") +} + +func Example_service_Client_DeleteShare() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + shareName := "testShare" + _, err = svcClient.DeleteShare(context.TODO(), shareName, nil) + handleError(err) + fmt.Println("Share deleted") +} + +func Example_service_Client_RestoreShare() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + // get share version for restore operation + pager := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Deleted: true}, // Include deleted shares in the result + }) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + handleError(err) + for _, s := range resp.Shares { + if s.Deleted != nil && *s.Deleted { + _, err = svcClient.RestoreShare(context.TODO(), *s.Name, *s.Version, nil) + handleError(err) + } + } + } +} + +func Example_service_Client_GetProperties() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + _, err = svcClient.GetProperties(context.TODO(), nil) + handleError(err) +} + +func Example_service_Client_SetProperties() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + setPropertiesOpts := service.SetPropertiesOptions{ + HourMetrics: &service.Metrics{ + Enabled: to.Ptr(true), + IncludeAPIs: to.Ptr(true), + RetentionPolicy: &service.RetentionPolicy{ + Enabled: to.Ptr(true), + Days: to.Ptr(int32(2)), + }, + }, + MinuteMetrics: &service.Metrics{ + Enabled: to.Ptr(true), + IncludeAPIs: to.Ptr(false), + RetentionPolicy: &service.RetentionPolicy{ + Enabled: to.Ptr(true), + Days: to.Ptr(int32(2)), + }, + }, + CORS: []*service.CORSRule{ + { + AllowedOrigins: to.Ptr("*"), + AllowedMethods: to.Ptr("PUT"), + AllowedHeaders: to.Ptr("x-ms-client-request-id"), + ExposedHeaders: to.Ptr("x-ms-*"), + MaxAgeInSeconds: to.Ptr(int32(2)), + }, + }, + } + _, err = svcClient.SetProperties(context.TODO(), &setPropertiesOpts) + handleError(err) +} + +func Example_service_Client_ListShares() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + pager := svcClient.NewListSharesPager(nil) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + handleError(err) + for _, s := range resp.Shares { + fmt.Println(*s.Name) + } + } +} + +func Example_service_Client_GetSASURL() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + resources := sas.AccountResourceTypes{ + Object: true, + Service: true, + Container: true, + } + permissions := sas.AccountPermissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Create: true, + } + expiry := time.Now().Add(time.Hour) + sasUrl, err := svcClient.GetSASURL(resources, permissions, expiry, nil) + handleError(err) + + fmt.Println("SAS URL: ", sasUrl) + + svcSASClient, err := service.NewClientWithNoCredential(sasUrl, nil) + handleError(err) + + _, err = svcSASClient.GetProperties(context.TODO(), nil) + handleError(err) +} diff --git a/sdk/storage/azfile/service/models.go b/sdk/storage/azfile/service/models.go new file mode 100644 index 000000000000..0a529af87248 --- /dev/null +++ b/sdk/storage/azfile/service/models.go @@ -0,0 +1,171 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "time" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// CreateShareOptions contains the optional parameters for the share.Client.Create method. +type CreateShareOptions = share.CreateOptions + +// DeleteShareOptions contains the optional parameters for the share.Client.Delete method. +type DeleteShareOptions = share.DeleteOptions + +// RestoreShareOptions contains the optional parameters for the share.Client.Restore method. +type RestoreShareOptions = share.RestoreOptions + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPropertiesOptions provides set of options for Client.GetProperties +type GetPropertiesOptions struct { + // placeholder for future options +} + +func (o *GetPropertiesOptions) format() *generated.ServiceClientGetPropertiesOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetPropertiesOptions provides set of options for Client.SetProperties +type SetPropertiesOptions struct { + // The set of CORS rules. + CORS []*CORSRule + + // A summary of request statistics grouped by API in hourly aggregates for files. + HourMetrics *Metrics + + // A summary of request statistics grouped by API in minute aggregates for files. + MinuteMetrics *Metrics + + // Protocol settings + Protocol *ProtocolSettings +} + +func (o *SetPropertiesOptions) format() (generated.StorageServiceProperties, *generated.ServiceClientSetPropertiesOptions) { + if o == nil { + return generated.StorageServiceProperties{}, nil + } + + formatMetrics(o.HourMetrics) + formatMetrics(o.MinuteMetrics) + + return generated.StorageServiceProperties{ + CORS: o.CORS, + HourMetrics: o.HourMetrics, + MinuteMetrics: o.MinuteMetrics, + Protocol: o.Protocol, + }, nil +} + +// update version of Storage Analytics to configure. Use 1.0 for this value. +func formatMetrics(m *Metrics) { + if m == nil { + return + } + + m.Version = to.Ptr(shared.StorageAnalyticsVersion) +} + +// StorageServiceProperties - Storage service properties. +type StorageServiceProperties = generated.StorageServiceProperties + +// CORSRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in +// another domain. Web browsers implement a security restriction known as same-origin policy that +// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin +// domain) to call APIs in another domain. +type CORSRule = generated.CORSRule + +// Metrics - Storage Analytics metrics for file service. +type Metrics = generated.Metrics + +// RetentionPolicy - The retention policy. +type RetentionPolicy = generated.RetentionPolicy + +// ProtocolSettings - Protocol settings +type ProtocolSettings = generated.ProtocolSettings + +// SMBSettings - Settings for SMB protocol. +type SMBSettings = generated.SMBSettings + +// SMBMultichannel - Settings for SMB multichannel +type SMBMultichannel = generated.SMBMultichannel + +// --------------------------------------------------------------------------------------------------------------------- + +// ListSharesOptions contains the optional parameters for the Client.NewListSharesPager method. +type ListSharesOptions struct { + // Include this parameter to specify one or more datasets to include in the responseBody. + Include ListSharesInclude + + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the responseBody body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + + // Specifies the maximum number of entries to return. If the request does not specify maxresults, or specifies a value greater + // than 5,000, the server will return up to 5,000 items. + MaxResults *int32 + + // Filters the results to return only entries whose name begins with the specified prefix. + Prefix *string +} + +// ListSharesInclude indicates what additional information the service should return with each share. +type ListSharesInclude struct { + // Tells the service whether to return metadata for each share. + Metadata bool + + // Tells the service whether to return soft-deleted shares. + Deleted bool + + // Tells the service whether to return share snapshots. + Snapshots bool +} + +// Share - A listed Azure Storage share item. +type Share = generated.Share + +// ShareProperties - Properties of a share. +type ShareProperties = generated.ShareProperties + +// --------------------------------------------------------------------------------------------------------------------- + +// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. +type GetSASURLOptions struct { + StartTime *time.Time +} + +func (o *GetSASURLOptions) format() time.Time { + if o == nil { + return time.Time{} + } + + var st time.Time + if o.StartTime != nil { + st = o.StartTime.UTC() + } else { + st = time.Time{} + } + return st +} diff --git a/sdk/storage/azfile/service/responses.go b/sdk/storage/azfile/service/responses.go new file mode 100644 index 000000000000..fad91de63547 --- /dev/null +++ b/sdk/storage/azfile/service/responses.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + +// CreateShareResponse contains the response from method share.Client.Create. +type CreateShareResponse = generated.ShareClientCreateResponse + +// DeleteShareResponse contains the response from method share.Client.Delete. +type DeleteShareResponse = generated.ShareClientDeleteResponse + +// RestoreShareResponse contains the response from method share.Client.Restore. +type RestoreShareResponse = generated.ShareClientRestoreResponse + +// GetPropertiesResponse contains the response from method Client.GetProperties. +type GetPropertiesResponse = generated.ServiceClientGetPropertiesResponse + +// SetPropertiesResponse contains the response from method Client.SetProperties. +type SetPropertiesResponse = generated.ServiceClientSetPropertiesResponse + +// ListSharesSegmentResponse contains the response from method Client.NewListSharesPager. +type ListSharesSegmentResponse = generated.ServiceClientListSharesSegmentResponse + +// ListSharesResponse - An enumeration of shares. +type ListSharesResponse = generated.ListSharesResponse diff --git a/sdk/storage/azfile/share/client.go b/sdk/storage/azfile/share/client.go new file mode 100644 index 000000000000..aac826a8a6c1 --- /dev/null +++ b/sdk/storage/azfile/share/client.go @@ -0,0 +1,258 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package share + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/directory" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + "net/url" + "time" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions base.ClientOptions + +// Client represents a URL to the Azure Storage share allowing you to manipulate its directories and files. +type Client base.Client[generated.ShareClient] + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a share or with a shared access signature (SAS) token. +// - shareURL - the URL of the share e.g. https://.file.core.windows.net/share? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(shareURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewShareClient(shareURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - shareURL - the URL of the share e.g. https://.file.core.windows.net/share +// - cred - a SharedKeyCredential created with the matching share's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(shareURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewShareClient(shareURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - shareName - the name of the share within the storage account +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, shareName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, shareName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (s *Client) generated() *generated.ShareClient { + return base.InnerClient((*base.Client[generated.ShareClient])(s)) +} + +func (s *Client) sharedKey() *SharedKeyCredential { + return base.SharedKey((*base.Client[generated.ShareClient])(s)) +} + +// URL returns the URL endpoint used by the Client object. +func (s *Client) URL() string { + return s.generated().Endpoint() +} + +// NewDirectoryClient creates a new directory.Client object by concatenating directoryName to the end of this Client's URL. +// The new directory.Client uses the same request policy pipeline as the Client. +func (s *Client) NewDirectoryClient(directoryName string) *directory.Client { + directoryName = url.PathEscape(directoryName) + directoryURL := runtime.JoinPaths(s.URL(), directoryName) + return (*directory.Client)(base.NewDirectoryClient(directoryURL, s.generated().Pipeline(), s.sharedKey())) +} + +// NewRootDirectoryClient creates a new directory.Client object for the root of the share using the Client's URL. +// The new directory.Client uses the same request policy pipeline as the Client. +func (s *Client) NewRootDirectoryClient() *directory.Client { + rootDirURL := s.URL() + return (*directory.Client)(base.NewDirectoryClient(rootDirURL, s.generated().Pipeline(), s.sharedKey())) +} + +// WithSnapshot creates a new Client object identical to the source but with the specified share snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base share. +func (s *Client) WithSnapshot(shareSnapshot string) (*Client, error) { + p, err := sas.ParseURL(s.URL()) + if err != nil { + return nil, err + } + p.ShareSnapshot = shareSnapshot + + return (*Client)(base.NewShareClient(p.String(), s.generated().Pipeline(), s.sharedKey())), nil +} + +// Create operation creates a new share within a storage account. If a share with the same name already exists, the operation fails. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/create-share. +func (s *Client) Create(ctx context.Context, options *CreateOptions) (CreateResponse, error) { + opts := options.format() + resp, err := s.generated().Create(ctx, opts) + return resp, err +} + +// Delete operation marks the specified share for deletion. The share and any files contained within it are later deleted during garbage collection. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/delete-share. +func (s *Client) Delete(ctx context.Context, options *DeleteOptions) (DeleteResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := s.generated().Delete(ctx, opts, leaseAccessConditions) + return resp, err +} + +// Restore operation restores a share that had previously been soft-deleted. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/restore-share. +func (s *Client) Restore(ctx context.Context, deletedShareVersion string, options *RestoreOptions) (RestoreResponse, error) { + urlParts, err := sas.ParseURL(s.URL()) + if err != nil { + return RestoreResponse{}, err + } + + opts := &generated.ShareClientRestoreOptions{ + DeletedShareName: &urlParts.ShareName, + DeletedShareVersion: &deletedShareVersion, + } + resp, err := s.generated().Restore(ctx, opts) + return resp, err +} + +// GetProperties operation returns all user-defined metadata and system properties for the specified share or share snapshot. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-share-properties. +func (s *Client) GetProperties(ctx context.Context, options *GetPropertiesOptions) (GetPropertiesResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := s.generated().GetProperties(ctx, opts, leaseAccessConditions) + return resp, err +} + +// SetProperties operation sets properties for the specified share. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-share-properties. +func (s *Client) SetProperties(ctx context.Context, options *SetPropertiesOptions) (SetPropertiesResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := s.generated().SetProperties(ctx, opts, leaseAccessConditions) + return resp, err +} + +// CreateSnapshot operation creates a read-only snapshot of a share. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/snapshot-share. +func (s *Client) CreateSnapshot(ctx context.Context, options *CreateSnapshotOptions) (CreateSnapshotResponse, error) { + opts := options.format() + resp, err := s.generated().CreateSnapshot(ctx, opts) + return resp, err +} + +// GetAccessPolicy operation returns information about stored access policies specified on the share. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-share-acl. +func (s *Client) GetAccessPolicy(ctx context.Context, options *GetAccessPolicyOptions) (GetAccessPolicyResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := s.generated().GetAccessPolicy(ctx, opts, leaseAccessConditions) + return resp, err +} + +// SetAccessPolicy operation sets a stored access policy for use with shared access signatures. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-share-acl. +func (s *Client) SetAccessPolicy(ctx context.Context, options *SetAccessPolicyOptions) (SetAccessPolicyResponse, error) { + opts, acl, leaseAccessConditions, err := options.format() + if err != nil { + return SetAccessPolicyResponse{}, err + } + + resp, err := s.generated().SetAccessPolicy(ctx, acl, opts, leaseAccessConditions) + return resp, err +} + +// CreatePermission operation creates a permission (a security descriptor) at the share level. +// The created security descriptor can be used for the files and directories in the share. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/create-permission. +func (s *Client) CreatePermission(ctx context.Context, sharePermission string, options *CreatePermissionOptions) (CreatePermissionResponse, error) { + permission, opts := options.format(sharePermission) + resp, err := s.generated().CreatePermission(ctx, permission, opts) + return resp, err +} + +// GetPermission operation gets the SDDL permission string from the service using a known permission key. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-permission. +func (s *Client) GetPermission(ctx context.Context, filePermissionKey string, options *GetPermissionOptions) (GetPermissionResponse, error) { + opts := options.format() + resp, err := s.generated().GetPermission(ctx, filePermissionKey, opts) + return resp, err +} + +// SetMetadata operation sets one or more user-defined name-value pairs for the specified share. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-share-metadata. +func (s *Client) SetMetadata(ctx context.Context, options *SetMetadataOptions) (SetMetadataResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := s.generated().SetMetadata(ctx, opts, leaseAccessConditions) + return resp, err +} + +// GetStatistics operation retrieves statistics related to the share. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-share-stats. +func (s *Client) GetStatistics(ctx context.Context, options *GetStatisticsOptions) (GetStatisticsResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := s.generated().GetStatistics(ctx, opts, leaseAccessConditions) + return resp, err +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at share. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (s *Client) GetSASURL(permissions sas.SharePermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { + if s.sharedKey() == nil { + return "", fileerror.MissingSharedKeyCredential + } + st := o.format() + + urlParts, err := sas.ParseURL(s.URL()) + if err != nil { + return "", err + } + + t, err := time.Parse(sas.SnapshotTimeFormat, urlParts.ShareSnapshot) + if err != nil { + t = time.Time{} + } + + qps, err := sas.SignatureValues{ + Version: sas.Version, + Protocol: sas.ProtocolHTTPS, + ShareName: urlParts.ShareName, + SnapshotTime: t, + Permissions: permissions.String(), + StartTime: st, + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(s.sharedKey()) + if err != nil { + return "", err + } + + endpoint := s.URL() + "?" + qps.Encode() + + return endpoint, nil +} diff --git a/sdk/storage/azfile/share/client_test.go b/sdk/storage/azfile/share/client_test.go new file mode 100644 index 000000000000..44940a537d27 --- /dev/null +++ b/sdk/storage/azfile/share/client_test.go @@ -0,0 +1,1460 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package share_test + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/testcommon" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "strconv" + "testing" + "time" +) + +func Test(t *testing.T) { + recordMode := recording.GetRecordMode() + t.Logf("Running share Tests in %s mode\n", recordMode) + if recordMode == recording.LiveMode { + suite.Run(t, &ShareRecordedTestsSuite{}) + suite.Run(t, &ShareUnrecordedTestsSuite{}) + } else if recordMode == recording.PlaybackMode { + suite.Run(t, &ShareRecordedTestsSuite{}) + } else if recordMode == recording.RecordingMode { + suite.Run(t, &ShareRecordedTestsSuite{}) + } +} + +func (s *ShareRecordedTestsSuite) BeforeTest(suite string, test string) { + testcommon.BeforeTest(s.T(), suite, test) +} + +func (s *ShareRecordedTestsSuite) AfterTest(suite string, test string) { + testcommon.AfterTest(s.T(), suite, test) +} + +func (s *ShareUnrecordedTestsSuite) BeforeTest(suite string, test string) { + +} + +func (s *ShareUnrecordedTestsSuite) AfterTest(suite string, test string) { + +} + +type ShareRecordedTestsSuite struct { + suite.Suite +} + +type ShareUnrecordedTestsSuite struct { + suite.Suite +} + +func (s *ShareRecordedTestsSuite) TestShareCreateRootDirectoryURL() { + _require := require.New(s.T()) + testName := s.T().Name() + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + rootDirClient := shareClient.NewRootDirectoryClient() + _require.Equal(shareClient.URL(), rootDirClient.URL()) +} + +func (s *ShareRecordedTestsSuite) TestShareCreateDirectoryURL() { + _require := require.New(s.T()) + testName := s.T().Name() + + accountName, _ := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName, dirName := testcommon.GenerateShareName(testName), testcommon.GenerateDirectoryName(testName) + shareClient := svcClient.NewShareClient(shareName) + _require.NoError(err) + dirClient := shareClient.NewDirectoryClient(dirName) + _require.NoError(err) + + correctURL := "https://" + accountName + ".file.core.windows.net/" + shareName + "/" + dirName + _require.Equal(dirClient.URL(), correctURL) +} + +func (s *ShareRecordedTestsSuite) TestShareCreateUsingSharedKey() { + _require := require.New(s.T()) + testName := s.T().Name() + + cred, err := testcommon.GetGenericSharedKeyCredential(testcommon.TestAccountDefault) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareURL := "https://" + cred.AccountName() + ".file.core.windows.net/" + shareName + options := &share.ClientOptions{} + testcommon.SetClientOptions(s.T(), &options.ClientOptions) + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, options) + _require.NoError(err) + + resp, err := shareClient.Create(context.Background(), nil) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + _require.NoError(err) + _require.NotNil(resp.ETag) + _require.NotNil(resp.RequestID) +} + +func (s *ShareRecordedTestsSuite) TestShareCreateUsingConnectionString() { + _require := require.New(s.T()) + testName := s.T().Name() + + connString, err := testcommon.GetGenericConnectionString(testcommon.TestAccountDefault) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + options := &share.ClientOptions{} + testcommon.SetClientOptions(s.T(), &options.ClientOptions) + shareClient, err := share.NewClientFromConnectionString(*connString, shareName, options) + _require.NoError(err) + + resp, err := shareClient.Create(context.Background(), nil) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + _require.NoError(err) + _require.NotNil(resp.ETag) + _require.NotNil(resp.RequestID) +} + +func (s *ShareUnrecordedTestsSuite) TestShareClientUsingSAS() { + _require := require.New(s.T()) + testName := s.T().Name() + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + permissions := sas.SharePermissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Create: true, + } + expiry := time.Now().Add(time.Hour) + + shareSASURL, err := shareClient.GetSASURL(permissions, expiry, nil) + _require.NoError(err) + + shareSASClient, err := share.NewClientWithNoCredential(shareSASURL, nil) + _require.NoError(err) + + _, err = shareSASClient.GetProperties(context.Background(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.AuthorizationFailure) + + dirName1 := testcommon.GenerateDirectoryName(testName) + "1" + _, err = shareSASClient.NewDirectoryClient(dirName1).Create(context.Background(), nil) + _require.NoError(err) + + dirName2 := testcommon.GenerateDirectoryName(testName) + "2" + _, err = shareSASClient.NewDirectoryClient(dirName2).Create(context.Background(), nil) + _require.NoError(err) + + fileName1 := testcommon.GenerateFileName(testName) + "1" + _, err = shareSASClient.NewRootDirectoryClient().NewFileClient(fileName1).Create(context.Background(), 1024, nil) + _require.NoError(err) + + fileName2 := testcommon.GenerateFileName(testName) + "2" + _, err = shareSASClient.NewDirectoryClient(dirName2).NewFileClient(fileName2).Create(context.Background(), 1024, nil) + _require.NoError(err) + + dirCtr, fileCtr := 0, 0 + pager := shareSASClient.NewRootDirectoryClient().NewListFilesAndDirectoriesPager(nil) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + dirCtr += len(resp.Segment.Directories) + fileCtr += len(resp.Segment.Files) + } + _require.Equal(dirCtr, 2) + _require.Equal(fileCtr, 1) +} + +func (s *ShareRecordedTestsSuite) TestShareCreateDeleteNonDefault() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := svcClient.NewShareClient(shareName) + + quota := int32(1000) + + cResp, err := shareClient.Create(context.Background(), &share.CreateOptions{ + AccessTier: to.Ptr(share.AccessTierCool), + Quota: to.Ptr(quota), + Metadata: testcommon.BasicMetadata}) + + _require.NoError(err) + _require.Equal(cResp.Date.IsZero(), false) + _require.NotNil(cResp.ETag) + _require.NotNil(cResp.LastModified) + _require.NotNil(cResp.RequestID) + _require.NotNil(cResp.Version) + + pager := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Prefix: to.Ptr(shareName), + Include: service.ListSharesInclude{Metadata: true}, + }) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + _require.Len(resp.Shares, 1) + _require.Equal(*resp.Shares[0].Name, shareName) + _require.NotNil(resp.Shares[0].Metadata) + _require.EqualValues(resp.Shares[0].Metadata, testcommon.BasicMetadata) + _require.Equal(*resp.Shares[0].Properties.AccessTier, string(share.AccessTierCool)) + _require.Equal(*resp.Shares[0].Properties.Quota, quota) + } + + dResp, err := shareClient.Delete(context.Background(), nil) + _require.NoError(err) + _require.NotNil(dResp.Date) + _require.NotNil(dResp.RequestID) + _require.NotNil(dResp.Version) + + pager1 := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Prefix: to.Ptr(shareName), + Include: service.ListSharesInclude{Metadata: true}, + }) + for pager1.More() { + resp, err := pager1.NextPage(context.Background()) + _require.NoError(err) + _require.Len(resp.Shares, 0) + } +} + +func (s *ShareRecordedTestsSuite) TestShareCreateNilMetadata() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := svcClient.NewShareClient(shareName) + + _, err = shareClient.Create(context.Background(), nil) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + _require.NoError(err) + + response, err := shareClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Len(response.Metadata, 0) +} + +func (s *ShareRecordedTestsSuite) TestShareCreateNegativeInvalidName() { + _require := require.New(s.T()) + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := svcClient.NewShareClient("foo bar") + + _, err = shareClient.Create(context.Background(), nil) + + testcommon.ValidateFileErrorCode(_require, err, fileerror.InvalidResourceName) +} + +func (s *ShareRecordedTestsSuite) TestShareCreateNegativeInvalidMetadata() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := svcClient.NewShareClient(shareName) + + _, err = shareClient.Create(context.Background(), &share.CreateOptions{ + Metadata: map[string]*string{"!@#$%^&*()": to.Ptr("!@#$%^&*()")}, + Quota: to.Ptr(int32(0)), + }) + _require.Error(err) +} + +func (s *ShareRecordedTestsSuite) TestShareDeleteNegativeNonExistent() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := svcClient.NewShareClient(shareName) + + _, err = shareClient.Delete(context.Background(), nil) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ShareNotFound) +} + +func (s *ShareRecordedTestsSuite) TestShareGetSetPropertiesNonDefault() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + newQuota := int32(1234) + + sResp, err := shareClient.SetProperties(context.Background(), &share.SetPropertiesOptions{ + Quota: to.Ptr(newQuota), + AccessTier: to.Ptr(share.AccessTierHot), + }) + _require.NoError(err) + _require.NotNil(sResp.ETag) + _require.Equal(sResp.LastModified.IsZero(), false) + _require.NotNil(sResp.RequestID) + _require.NotNil(sResp.Version) + _require.Equal(sResp.Date.IsZero(), false) + + props, err := shareClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(props.ETag) + _require.Equal(props.LastModified.IsZero(), false) + _require.NotNil(props.RequestID) + _require.NotNil(props.Version) + _require.Equal(props.Date.IsZero(), false) + _require.Equal(*props.Quota, newQuota) + _require.Equal(*props.AccessTier, string(share.AccessTierHot)) +} + +func (s *ShareRecordedTestsSuite) TestShareGetSetPropertiesDefault() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + sResp, err := shareClient.SetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(sResp.ETag) + _require.Equal(sResp.LastModified.IsZero(), false) + _require.NotNil(sResp.RequestID) + _require.NotNil(sResp.Version) + _require.Equal(sResp.Date.IsZero(), false) + + props, err := shareClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(props.ETag) + _require.Equal(props.LastModified.IsZero(), false) + _require.NotNil(props.RequestID) + _require.NotNil(props.Version) + _require.Equal(props.Date.IsZero(), false) + _require.Greater(*props.Quota, int32(0)) // When using service default quota, it could be any value +} + +func (s *ShareRecordedTestsSuite) TestShareSetQuotaNegative() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + _, err = shareClient.SetProperties(context.Background(), &share.SetPropertiesOptions{Quota: to.Ptr(int32(-1))}) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.InvalidHeaderValue) +} + +func (s *ShareRecordedTestsSuite) TestShareGetPropertiesNegative() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.GetShareClient(shareName, svcClient) + + _, err = shareClient.GetProperties(context.Background(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ShareNotFound) +} + +func (s *ShareRecordedTestsSuite) TestSharePutAndGetPermission() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + // Create a permission and check that it's not empty. + createResp, err := shareClient.CreatePermission(context.Background(), testcommon.SampleSDDL, nil) + _require.NoError(err) + _require.NotEqual(*createResp.FilePermissionKey, "") + + getResp, err := shareClient.GetPermission(context.Background(), *createResp.FilePermissionKey, nil) + _require.NoError(err) + // Rather than checking against the original, we check for emptiness, as Azure Files has set a nil-ness flag on SACLs + // and converted our well-known SID. + /* + Expected :string = "O:S-1-5-32-548G:S-1-5-21-397955417-626881126-188441444-512D:(A;;RPWPCCDCLCSWRCWDWOGA;;;S-1-0-0)" + Actual :string = "O:AOG:S-1-5-21-397955417-626881126-188441444-512D:(A;;CCDCLCSWRPWPRCWDWOGA;;;S-1-0-0)S:NO_ACCESS_CONTROL" + */ + _require.NotNil(getResp.Permission) + _require.NotEmpty(*getResp.Permission) +} + +func (s *ShareRecordedTestsSuite) TestShareGetSetAccessPolicyNonDefault() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + currTime, err := time.Parse(time.UnixDate, "Fri Mar 31 20:00:00 GMT 2023") + _require.NoError(err) + now := currTime.UTC().Truncate(10000 * time.Millisecond) // Enough resolution + expiryTIme := now.Add(5 * time.Minute).UTC() + pS := share.AccessPolicyPermission{ + Read: true, + Write: true, + Create: true, + Delete: true, + List: true, + } + pS2 := &share.AccessPolicyPermission{} + err = pS2.Parse("ldcwr") + _require.NoError(err) + _require.EqualValues(*pS2, pS) + + permission := pS.String() + permissions := []*share.SignedIdentifier{ + { + ID: to.Ptr("MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTI="), + AccessPolicy: &share.AccessPolicy{ + Start: &now, + Expiry: &expiryTIme, + Permission: &permission, + }, + }} + + sResp, err := shareClient.SetAccessPolicy(context.Background(), &share.SetAccessPolicyOptions{ + ShareACL: permissions, + }) + _require.NoError(err) + _require.Equal(sResp.Date.IsZero(), false) + _require.NotNil(sResp.ETag) + _require.Equal(sResp.LastModified.IsZero(), false) + _require.NotNil(sResp.RequestID) + _require.NotNil(sResp.Version) + + gResp, err := shareClient.GetAccessPolicy(context.Background(), nil) + _require.NoError(err) + _require.Equal(gResp.Date.IsZero(), false) + _require.NotNil(gResp.ETag) + _require.Equal(gResp.LastModified.IsZero(), false) + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.Version) + _require.Len(gResp.SignedIdentifiers, 1) + _require.EqualValues(*(gResp.SignedIdentifiers[0]), *permissions[0]) +} + +func (s *ShareRecordedTestsSuite) TestShareGetSetAccessPolicyNonDefaultMultiple() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + currTime, err := time.Parse(time.UnixDate, "Fri Mar 31 20:00:00 GMT 2023") + _require.NoError(err) + now := currTime.UTC().Truncate(10000 * time.Millisecond) // Enough resolution + expiryTIme := now.Add(5 * time.Minute).UTC() + permission := share.AccessPolicyPermission{ + Read: true, + Write: true, + }.String() + + permissions := []*share.SignedIdentifier{ + { + ID: to.Ptr("MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTI="), + AccessPolicy: &share.AccessPolicy{ + Start: &now, + Expiry: &expiryTIme, + Permission: &permission, + }, + }, + { + ID: to.Ptr("2"), + AccessPolicy: &share.AccessPolicy{ + Start: &now, + Expiry: &expiryTIme, + Permission: &permission, + }, + }} + + sResp, err := shareClient.SetAccessPolicy(context.Background(), &share.SetAccessPolicyOptions{ + ShareACL: permissions, + }) + _require.NoError(err) + _require.Equal(sResp.Date.IsZero(), false) + _require.NotNil(sResp.ETag) + _require.Equal(sResp.LastModified.IsZero(), false) + _require.NotNil(sResp.RequestID) + _require.NotNil(sResp.Version) + + gResp, err := shareClient.GetAccessPolicy(context.Background(), nil) + _require.NoError(err) + _require.Equal(gResp.Date.IsZero(), false) + _require.NotNil(gResp.ETag) + _require.Equal(gResp.LastModified.IsZero(), false) + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.Version) + _require.Len(gResp.SignedIdentifiers, 2) + _require.EqualValues(gResp.SignedIdentifiers[0], permissions[0]) + _require.EqualValues(gResp.SignedIdentifiers[1], permissions[1]) +} + +func (s *ShareRecordedTestsSuite) TestShareSetAccessPolicyMoreThanFive() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + currTime, err := time.Parse(time.UnixDate, "Fri Mar 31 20:00:00 GMT 2023") + _require.NoError(err) + now := currTime.UTC().Truncate(10000 * time.Millisecond) // Enough resolution + expiryTIme := now.Add(5 * time.Minute).UTC() + permission := share.AccessPolicyPermission{ + Read: true, + Create: true, + Write: true, + Delete: true, + List: true, + }.String() + + var permissions []*share.SignedIdentifier + for i := 0; i <= len(permission); i++ { + p := permission + if i < len(permission) { + p = string(permission[i]) + } + permissions = append(permissions, &share.SignedIdentifier{ + ID: to.Ptr(fmt.Sprintf("%v", i)), + AccessPolicy: &share.AccessPolicy{ + Start: &now, + Expiry: &expiryTIme, + Permission: &p, + }, + }) + } + _require.Len(permissions, 6) + + _, err = shareClient.SetAccessPolicy(context.Background(), &share.SetAccessPolicyOptions{ + ShareACL: permissions, + }) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.InvalidXMLDocument) +} + +func (s *ShareRecordedTestsSuite) TestShareGetSetAccessPolicyDefault() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + sResp, err := shareClient.SetAccessPolicy(context.Background(), nil) + _require.NoError(err) + _require.Equal(sResp.Date.IsZero(), false) + _require.NotNil(sResp.ETag) + _require.Equal(sResp.LastModified.IsZero(), false) + _require.NotNil(sResp.RequestID) + _require.NotNil(sResp.Version) + + gResp, err := shareClient.GetAccessPolicy(context.Background(), nil) + _require.NoError(err) + _require.Equal(gResp.Date.IsZero(), false) + _require.NotNil(gResp.ETag) + _require.Equal(gResp.LastModified.IsZero(), false) + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.Version) + _require.Len(gResp.SignedIdentifiers, 0) +} + +func (s *ShareRecordedTestsSuite) TestShareGetAccessPolicyNegative() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.GetShareClient(shareName, svcClient) + + _, err = shareClient.GetAccessPolicy(context.Background(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ShareNotFound) +} + +func (s *ShareRecordedTestsSuite) TestShareSetAccessPolicyNonDefaultDeleteAndModifyACL() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + currTime, err := time.Parse(time.UnixDate, "Thu Mar 30 20:00:00 GMT 2023") + _require.NoError(err) + start := currTime.UTC().Truncate(10000 * time.Millisecond) + expiry := start.Add(5 * time.Minute).UTC() + accessPermission := share.AccessPolicyPermission{List: true}.String() + permissions := make([]*share.SignedIdentifier, 2) + for i := 0; i < 2; i++ { + permissions[i] = &share.SignedIdentifier{ + ID: to.Ptr("000" + strconv.Itoa(i)), + AccessPolicy: &share.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &accessPermission, + }, + } + } + + _, err = shareClient.SetAccessPolicy(context.Background(), &share.SetAccessPolicyOptions{ + ShareACL: permissions, + }) + _require.NoError(err) + + resp, err := shareClient.GetAccessPolicy(context.Background(), nil) + _require.NoError(err) + _require.Len(resp.SignedIdentifiers, len(permissions)) + _require.EqualValues(resp.SignedIdentifiers, permissions) + + permissions = resp.SignedIdentifiers[:1] // Delete the second policy by removing it from the slice + permissions[0].ID = to.Ptr("0004") // Modify the remaining policy which is at index 0 in the new slice + _, err = shareClient.SetAccessPolicy(context.Background(), &share.SetAccessPolicyOptions{ + ShareACL: permissions, + }) + _require.NoError(err) + + resp, err = shareClient.GetAccessPolicy(context.Background(), nil) + _require.NoError(err) + _require.Len(resp.SignedIdentifiers, 1) + _require.EqualValues(resp.SignedIdentifiers, permissions) +} + +func (s *ShareRecordedTestsSuite) TestShareSetAccessPolicyDeleteAllPolicies() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + currTime, err := time.Parse(time.UnixDate, "Fri Mar 31 20:00:00 GMT 2023") + _require.NoError(err) + start := currTime.UTC() + expiry := start.Add(5 * time.Minute).UTC() + accessPermission := share.AccessPolicyPermission{List: true}.String() + permissions := make([]*share.SignedIdentifier, 2) + for i := 0; i < 2; i++ { + permissions[i] = &share.SignedIdentifier{ + ID: to.Ptr("000" + strconv.Itoa(i)), + AccessPolicy: &share.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &accessPermission, + }, + } + } + + _, err = shareClient.SetAccessPolicy(context.Background(), &share.SetAccessPolicyOptions{ + ShareACL: permissions, + }) + _require.NoError(err) + + resp1, err := shareClient.GetAccessPolicy(context.Background(), nil) + _require.NoError(err) + _require.Len(resp1.SignedIdentifiers, 2) + + _, err = shareClient.SetAccessPolicy(context.Background(), nil) + _require.NoError(err) + + resp2, err := shareClient.GetAccessPolicy(context.Background(), nil) + _require.NoError(err) + _require.Len(resp2.SignedIdentifiers, 0) +} + +func (s *ShareRecordedTestsSuite) TestShareSetPermissionsNegativeInvalidPolicyTimes() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + // Swap start and expiry + currTime, err := time.Parse(time.UnixDate, "Fri Mar 31 20:00:00 GMT 2023") + _require.NoError(err) + expiry := currTime.UTC() + start := expiry.Add(5 * time.Minute).UTC() + accessPermission := share.AccessPolicyPermission{List: true}.String() + permissions := make([]*share.SignedIdentifier, 2) + for i := 0; i < 2; i++ { + permissions[i] = &share.SignedIdentifier{ + ID: to.Ptr("000" + strconv.Itoa(i)), + AccessPolicy: &share.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &accessPermission, + }, + } + } + + _, err = shareClient.SetAccessPolicy(context.Background(), &share.SetAccessPolicyOptions{ + ShareACL: permissions, + }) + _require.NoError(err) + + resp, err := shareClient.GetAccessPolicy(context.Background(), nil) + _require.NoError(err) + _require.Len(resp.SignedIdentifiers, len(permissions)) + _require.EqualValues(resp.SignedIdentifiers, permissions) +} + +// SignedIdentifier ID too long +func (s *ShareRecordedTestsSuite) TestShareSetPermissionsNegative() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + id := "" + for i := 0; i < 65; i++ { + id += "a" + } + currTime, err := time.Parse(time.UnixDate, "Wed Mar 29 20:00:00 GMT 2023") + _require.NoError(err) + expiry := currTime.UTC() + start := expiry.Add(5 * time.Minute).UTC() + accessPermission := share.AccessPolicyPermission{List: true}.String() + permissions := make([]*share.SignedIdentifier, 2) + for i := 0; i < 2; i++ { + permissions[i] = &share.SignedIdentifier{ + ID: to.Ptr(id), + AccessPolicy: &share.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &accessPermission, + }, + } + } + + _, err = shareClient.SetAccessPolicy(context.Background(), &share.SetAccessPolicyOptions{ + ShareACL: permissions, + }) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.InvalidXMLDocument) +} + +func (s *ShareRecordedTestsSuite) TestShareGetSetMetadataDefault() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + sResp, err := shareClient.SetMetadata(context.Background(), &share.SetMetadataOptions{ + Metadata: map[string]*string{}, + }) + _require.NoError(err) + _require.Equal(sResp.Date.IsZero(), false) + _require.NotNil(sResp.ETag) + _require.Equal(sResp.LastModified.IsZero(), false) + _require.NotNil(sResp.RequestID) + _require.NotNil(sResp.Version) + + gResp, err := shareClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(gResp.Date.IsZero(), false) + _require.NotNil(gResp.ETag) + _require.Equal(gResp.LastModified.IsZero(), false) + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.Version) + _require.Len(gResp.Metadata, 0) +} + +func (s *ShareRecordedTestsSuite) TestShareGetSetMetadataNonDefault() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + md := map[string]*string{ + "Foo": to.Ptr("FooValuE"), + "Bar": to.Ptr("bArvaLue"), + } + sResp, err := shareClient.SetMetadata(context.Background(), &share.SetMetadataOptions{ + Metadata: md, + }) + _require.NoError(err) + _require.Equal(sResp.Date.IsZero(), false) + _require.NotNil(sResp.ETag) + _require.Equal(sResp.LastModified.IsZero(), false) + _require.NotNil(sResp.RequestID) + _require.NotNil(sResp.Version) + + gResp, err := shareClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(gResp.Date.IsZero(), false) + _require.NotNil(gResp.ETag) + _require.Equal(gResp.LastModified.IsZero(), false) + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.Version) + _require.EqualValues(gResp.Metadata, md) +} + +func (s *ShareRecordedTestsSuite) TestShareSetMetadataNegative() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + md := map[string]*string{ + "!@#$%^&*()": to.Ptr("!@#$%^&*()"), + } + _, err = shareClient.SetMetadata(context.Background(), &share.SetMetadataOptions{ + Metadata: md, + }) + _require.Error(err) +} + +func (s *ShareRecordedTestsSuite) TestShareGetStats() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + newQuota := int32(300) + + // In order to test and get LastModified property. + _, err = shareClient.SetProperties(context.Background(), &share.SetPropertiesOptions{Quota: to.Ptr(newQuota)}) + _require.NoError(err) + + gResp, err := shareClient.GetStatistics(context.Background(), nil) + _require.NoError(err) + _require.Equal(gResp.Date.IsZero(), false) + // _require.NotEqual(*gResp.ETag, "") // TODO: The ETag would be "" + // _require.Equal(gResp.LastModified.IsZero(), false) // TODO: Even share is once updated, no LastModified would be returned. + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.Version) + _require.Equal(*gResp.ShareUsageBytes, int64(0)) +} + +func (s *ShareRecordedTestsSuite) TestShareGetStatsNegative() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.GetShareClient(shareName, svcClient) + + _, err = shareClient.GetStatistics(context.Background(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ShareNotFound) +} + +func (s *ShareRecordedTestsSuite) TestSetAndGetStatistics() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.GetShareClient(shareName, svcClient) + + _, err = shareClient.Create(context.Background(), &share.CreateOptions{Quota: to.Ptr(int32(1024))}) + _require.NoError(err) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirClient := shareClient.NewDirectoryClient("testdir") + _, err = dirClient.Create(context.Background(), nil) + _require.NoError(err) + + fileClient := dirClient.NewFileClient("testfile") + _, err = fileClient.Create(context.Background(), int64(1024*1024*1024*1024), nil) + _require.NoError(err) + + getStats, err := shareClient.GetStatistics(context.Background(), nil) + _require.NoError(err) + _require.Equal(*getStats.ShareUsageBytes, int64(1024*1024*1024*1024)) +} + +func deleteShare(ctx context.Context, _require *require.Assertions, shareClient *share.Client, o *share.DeleteOptions) { + _, err := shareClient.Delete(ctx, o) + _require.NoError(err) +} + +func (s *ShareRecordedTestsSuite) TestShareCreateSnapshotNonDefault() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer deleteShare(context.Background(), _require, shareClient, &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) + + cResp, err := shareClient.CreateSnapshot(context.Background(), &share.CreateSnapshotOptions{Metadata: testcommon.BasicMetadata}) + _require.NoError(err) + _require.Equal(cResp.Date.IsZero(), false) + _require.NotNil(cResp.ETag) + _require.NotEqual(*cResp.ETag, "") + _require.Equal(cResp.LastModified.IsZero(), false) + _require.NotNil(cResp.RequestID) + _require.NotNil(cResp.Version) + _require.NotNil(cResp.Snapshot) + _require.NotEqual(*cResp.Snapshot, "") + + cSnapshot := *cResp.Snapshot + + pager := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Metadata: true, Snapshots: true}, + Prefix: &shareName, + }) + + foundSnapshot := false + for pager.More() { + lResp, err := pager.NextPage(context.Background()) + _require.NoError(err) + _require.Len(lResp.Shares, 2) + + for _, s := range lResp.Shares { + if s.Snapshot != nil { + foundSnapshot = true + _require.Equal(*s.Snapshot, cSnapshot) + _require.NotNil(s.Metadata) + _require.EqualValues(s.Metadata, testcommon.BasicMetadata) + } else { + _require.Len(s.Metadata, 0) + } + } + } + _require.True(foundSnapshot) +} + +func (s *ShareUnrecordedTestsSuite) TestShareCreateSnapshotDefault() { + _require := require.New(s.T()) + testName := s.T().Name() + + cred, err := testcommon.GetGenericSharedKeyCredential(testcommon.TestAccountDefault) + _require.NoError(err) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := svcClient.NewShareClient(shareName) + + _, err = shareClient.Create(context.Background(), nil) + _require.NoError(err) + defer deleteShare(context.Background(), _require, shareClient, &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) + + // create a file in the base share. + dirClient := shareClient.NewRootDirectoryClient() + _require.NoError(err) + + fClient := dirClient.NewFileClient("myfile") + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + // Create share snapshot, the snapshot contains the create file. + snapshotShare, err := shareClient.CreateSnapshot(context.Background(), nil) + _require.NoError(err) + + // Delete file in base share. + _, err = fClient.Delete(context.Background(), nil) + _require.NoError(err) + + // To produce a share SAS (as opposed to a file SAS), assign to FilePermissions using + // ShareSASPermissions and make sure the DirectoryAndFilePath field is "" (the default). + perms := sas.SharePermissions{Read: true, Write: true} + + // Restore file from share snapshot. + // Create a SAS. + sasQueryParams, err := sas.SignatureValues{ + Protocol: sas.ProtocolHTTPS, // Users MUST use HTTPS (not HTTP) + ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration + ShareName: shareName, + Permissions: perms.String(), + }.SignWithSharedKey(cred) + _require.NoError(err) + + // Build a file snapshot URL. + fileParts, err := sas.ParseURL(fClient.URL()) + _require.NoError(err) + fileParts.ShareSnapshot = *snapshotShare.Snapshot + fileParts.SAS = sasQueryParams + sourceURL := fileParts.String() + + // Before restore + _, err = fClient.GetProperties(context.Background(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ResourceNotFound) + + // Do restore. + _, err = fClient.StartCopyFromURL(context.Background(), sourceURL, nil) + _require.NoError(err) + + time.Sleep(2 * time.Second) + + // After restore + _, err = fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + + _, err = shareClient.Delete(context.Background(), &share.DeleteOptions{ + ShareSnapshot: snapshotShare.Snapshot, + }) + _require.NoError(err) +} + +func (s *ShareRecordedTestsSuite) TestShareCreateSnapshotNegativeShareNotExist() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.GetShareClient(shareName, svcClient) + + _, err = shareClient.CreateSnapshot(context.Background(), &share.CreateSnapshotOptions{Metadata: map[string]*string{}}) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ShareNotFound) +} + +func (s *ShareRecordedTestsSuite) TestShareDeleteSnapshot() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer deleteShare(context.Background(), _require, shareClient, &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) + + resp1, err := shareClient.CreateSnapshot(context.Background(), nil) + _require.NoError(err) + _require.NotNil(resp1.Snapshot) + _require.NotEmpty(*resp1.Snapshot) + + resp2, err := shareClient.CreateSnapshot(context.Background(), nil) + _require.NoError(err) + _require.NotNil(resp2.Snapshot) + _require.NotEmpty(*resp2.Snapshot) + + pager := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Snapshots: true}, + Prefix: &shareName, + }) + + snapshotsCtr := 0 + for pager.More() { + lResp, err := pager.NextPage(context.Background()) + _require.NoError(err) + _require.Len(lResp.Shares, 3) // 2 snapshots and 1 share + + for _, s := range lResp.Shares { + if s.Snapshot != nil { + snapshotsCtr++ + } + } + } + _require.Equal(snapshotsCtr, 2) + + snapClient, err := shareClient.WithSnapshot(*resp1.Snapshot) + _require.NoError(err) + + _, err = snapClient.Delete(context.Background(), nil) + _require.NoError(err) + + pager = svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Snapshots: true}, + Prefix: &shareName, + }) + + snapshotsCtr = 0 + for pager.More() { + lResp, err := pager.NextPage(context.Background()) + _require.NoError(err) + _require.Len(lResp.Shares, 2) + + for _, s := range lResp.Shares { + if s.Snapshot != nil { + snapshotsCtr++ + _require.Equal(*s.Snapshot, *resp2.Snapshot) + } + } + } + _require.Equal(snapshotsCtr, 1) +} + +func (s *ShareRecordedTestsSuite) TestShareCreateSnapshotNegativeMetadataInvalid() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + _, err = shareClient.CreateSnapshot(context.Background(), &share.CreateSnapshotOptions{Metadata: map[string]*string{"!@#$%^&*()": to.Ptr("!@#$%^&*()")}}) + _require.Error(err) +} + +func (s *ShareRecordedTestsSuite) TestShareCreateSnapshotNegativeSnapshotOfSnapshot() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer deleteShare(context.Background(), _require, shareClient, &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) + + snapTime, err := time.Parse(time.UnixDate, "Fri Mar 31 20:00:00 GMT 2023") + _require.NoError(err) + + snapshotClient, err := shareClient.WithSnapshot(snapTime.UTC().String()) + _require.NoError(err) + + cResp, err := snapshotClient.CreateSnapshot(context.Background(), nil) + _require.NoError(err) //Note: this would not fail, snapshot would be ignored. + _require.NotNil(cResp) + _require.NotEmpty(*cResp.Snapshot) + + snapshotRecursiveClient, err := shareClient.WithSnapshot(*cResp.Snapshot) + _require.NoError(err) + _, err = snapshotRecursiveClient.CreateSnapshot(context.Background(), nil) + _require.NoError(err) //Note: this would not fail, snapshot would be ignored. +} + +func (s *ShareRecordedTestsSuite) TestShareDeleteSnapshotsInclude() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + + _, err = shareClient.CreateSnapshot(context.Background(), nil) + _require.NoError(err) + + pager := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Snapshots: true}, + Prefix: &shareName, + }) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + _require.Len(resp.Shares, 2) + } + + _, err = shareClient.Delete(context.Background(), &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) + _require.NoError(err) + + pager = svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Snapshots: true}, + Prefix: &shareName, + }) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + _require.Len(resp.Shares, 0) + } +} + +func (s *ShareRecordedTestsSuite) TestShareDeleteSnapshotsNoneWithSnapshots() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer deleteShare(context.Background(), _require, shareClient, &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) + + _, err = shareClient.CreateSnapshot(context.Background(), nil) + _require.NoError(err) + + _, err = shareClient.Delete(context.Background(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ShareHasSnapshots) +} + +func (s *ShareRecordedTestsSuite) TestShareRestoreSuccess() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountSoftDelete, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + _, err = shareClient.Delete(context.Background(), nil) + _require.NoError(err) + + // wait for share deletion + time.Sleep(60 * time.Second) + + pager := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Deleted: true}, + Prefix: &shareName, + }) + + shareVersion := "" + shareCtr := 0 + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + + for _, s := range resp.Shares { + if s.Deleted != nil && *s.Deleted { + shareVersion = *s.Version + } else { + shareCtr++ + } + } + } + _require.NotEmpty(shareVersion) + _require.Equal(shareCtr, 0) + + rResp, err := shareClient.Restore(context.Background(), shareVersion, nil) + _require.NoError(err) + _require.NotNil(rResp.ETag) + _require.NotNil(rResp.RequestID) + _require.NotNil(rResp.Version) + + pager = svcClient.NewListSharesPager(&service.ListSharesOptions{ + Prefix: &shareName, + }) + + shareCtr = 0 + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + shareCtr += len(resp.Shares) + } + _require.Equal(shareCtr, 1) +} + +func (s *ShareRecordedTestsSuite) TestShareRestoreFailures() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountSoftDelete, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + _, err = shareClient.Restore(context.Background(), "", nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.MissingRequiredHeader) +} + +func (s *ShareRecordedTestsSuite) TestShareRestoreWithSnapshotsAgain() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountSoftDelete, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer deleteShare(context.Background(), _require, shareClient, &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) + + cResp, err := shareClient.CreateSnapshot(context.Background(), nil) + _require.NoError(err) + _require.NotNil(cResp.Snapshot) + + _, err = shareClient.Delete(context.Background(), &share.DeleteOptions{ + DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude), + }) + _require.NoError(err) + + // wait for share deletion + time.Sleep(60 * time.Second) + + pager := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Deleted: true}, + Prefix: &shareName, + }) + + shareVersion := "" + shareCtr := 0 + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + + for _, s := range resp.Shares { + if s.Deleted != nil && *s.Deleted { + shareVersion = *s.Version + } else { + shareCtr++ + } + } + } + _require.NotEmpty(shareVersion) + _require.Equal(shareCtr, 0) + + rResp, err := shareClient.Restore(context.Background(), shareVersion, nil) + _require.NoError(err) + _require.NotNil(rResp.ETag) + _require.NotNil(rResp.RequestID) + _require.NotNil(rResp.Version) + + pager = svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Snapshots: true}, + Prefix: &shareName, + }) + + shareCtr = 0 + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + shareCtr += len(resp.Shares) + for _, s := range resp.Shares { + if s.Snapshot != nil { + _require.Equal(*s.Snapshot, *cResp.Snapshot) + } + } + } + _require.Equal(shareCtr, 2) // 1 share and 1 snapshot +} + +func (s *ShareRecordedTestsSuite) TestSASShareClientNoKey() { + _require := require.New(s.T()) + accountName, _ := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + + testName := s.T().Name() + shareName := testcommon.GenerateShareName(testName) + shareClient, err := share.NewClientWithNoCredential(fmt.Sprintf("https://%s.file.core.windows.net/%v", accountName, shareName), nil) + _require.NoError(err) + + permissions := sas.SharePermissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Create: true, + } + expiry := time.Now().Add(time.Hour) + + _, err = shareClient.GetSASURL(permissions, expiry, nil) + _require.Equal(err, fileerror.MissingSharedKeyCredential) +} + +func (s *ShareRecordedTestsSuite) TestSASShareClientSignNegative() { + _require := require.New(s.T()) + accountName, accountKey := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + _require.Greater(len(accountKey), 0) + + cred, err := share.NewSharedKeyCredential(accountName, accountKey) + _require.NoError(err) + + testName := s.T().Name() + shareName := testcommon.GenerateShareName(testName) + shareClient, err := share.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.file.core.windows.net/%v", accountName, shareName), cred, nil) + _require.NoError(err) + + permissions := sas.SharePermissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Create: true, + } + expiry := time.Time{} + + // zero expiry time + _, err = shareClient.GetSASURL(permissions, expiry, &share.GetSASURLOptions{StartTime: to.Ptr(time.Now())}) + _require.Equal(err.Error(), "service SAS is missing at least one of these: ExpiryTime or Permissions") + + // zero start and expiry time + _, err = shareClient.GetSASURL(permissions, expiry, &share.GetSASURLOptions{}) + _require.Equal(err.Error(), "service SAS is missing at least one of these: ExpiryTime or Permissions") + + // empty permissions + _, err = shareClient.GetSASURL(sas.SharePermissions{}, expiry, nil) + _require.Equal(err.Error(), "service SAS is missing at least one of these: ExpiryTime or Permissions") +} diff --git a/sdk/storage/azfile/share/constants.go b/sdk/storage/azfile/share/constants.go new file mode 100644 index 000000000000..231ab9e27e09 --- /dev/null +++ b/sdk/storage/azfile/share/constants.go @@ -0,0 +1,50 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package share + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + +// AccessTier defines values for the access tier of the share. +type AccessTier = generated.ShareAccessTier + +const ( + AccessTierCool AccessTier = generated.ShareAccessTierCool + AccessTierHot AccessTier = generated.ShareAccessTierHot + AccessTierTransactionOptimized AccessTier = generated.ShareAccessTierTransactionOptimized +) + +// PossibleAccessTierValues returns the possible values for the AccessTier const type. +func PossibleAccessTierValues() []AccessTier { + return generated.PossibleShareAccessTierValues() +} + +// RootSquash defines values for the root squashing behavior on the share when NFS is enabled. If it's not specified, the default is NoRootSquash. +type RootSquash = generated.ShareRootSquash + +const ( + RootSquashNoRootSquash RootSquash = generated.ShareRootSquashNoRootSquash + RootSquashRootSquash RootSquash = generated.ShareRootSquashRootSquash + RootSquashAllSquash RootSquash = generated.ShareRootSquashAllSquash +) + +// PossibleRootSquashValues returns the possible values for the RootSquash const type. +func PossibleRootSquashValues() []RootSquash { + return generated.PossibleShareRootSquashValues() +} + +// DeleteSnapshotsOptionType defines values for DeleteSnapshotsOptionType +type DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionType + +const ( + DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeInclude + DeleteSnapshotsOptionTypeIncludeLeased DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeIncludeLeased +) + +// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type. +func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { + return generated.PossibleDeleteSnapshotsOptionTypeValues() +} diff --git a/sdk/storage/azfile/share/examples_test.go b/sdk/storage/azfile/share/examples_test.go new file mode 100644 index 000000000000..bb4739e9b151 --- /dev/null +++ b/sdk/storage/azfile/share/examples_test.go @@ -0,0 +1,464 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package share_test + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "log" + "os" + "time" +) + +func handleError(err error) { + if err != nil { + log.Fatal(err.Error()) + } +} + +func Example_share_Client_NewClient() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + fmt.Println(shareClient.URL()) +} + +func Example_share_Client_NewClientFromConnectionString() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + + shareName := "testshare" + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + fmt.Println(shareClient.URL()) +} + +func Example_share_Client_NewDirectoryClient() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + dirName := "testdirectory" + dirClient := shareClient.NewDirectoryClient(dirName) + + fmt.Println(dirClient.URL()) +} + +func Example_share_Client_NewRootDirectoryClient() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + dirClient := shareClient.NewRootDirectoryClient() + + fmt.Println(dirClient.URL()) +} + +func Example_share_Client_CreateSnapshot() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + snapResp, err := shareClient.CreateSnapshot(context.TODO(), nil) + handleError(err) + shareSnapshot := *snapResp.Snapshot + + snapshotShareClient, err := shareClient.WithSnapshot(shareSnapshot) + handleError(err) + + fmt.Println(snapshotShareClient.URL()) + + _, err = snapshotShareClient.GetProperties(context.TODO(), nil) + handleError(err) +} + +func Example_share_Client_Create() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + _, err = shareClient.Create(context.TODO(), nil) + handleError(err) +} + +func Example_share_Client_Delete() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + _, err = shareClient.Delete(context.TODO(), nil) + handleError(err) +} + +func Example_share_Client_Restore() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + shareClient := svcClient.NewShareClient(shareName) + + // get share version for restore operation + pager := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Deleted: true}, // Include deleted shares in the result + }) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + handleError(err) + for _, s := range resp.Shares { + if s.Deleted != nil && *s.Deleted { + _, err = shareClient.Restore(context.TODO(), *s.Version, nil) + handleError(err) + } + } + } +} + +func Example_share_Client_GetProperties() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + _, err = shareClient.GetProperties(context.TODO(), nil) + handleError(err) +} + +func Example_share_Client_SetProperties() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + _, err = shareClient.SetProperties(context.TODO(), &share.SetPropertiesOptions{ + Quota: to.Ptr(int32(1000)), + AccessTier: to.Ptr(share.AccessTierHot), + }) + handleError(err) +} + +func Example_share_Client_AccessPolicy() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + permission := share.AccessPolicyPermission{Read: true, Write: true, Create: true, Delete: true, List: true}.String() + permissions := []*share.SignedIdentifier{ + { + ID: to.Ptr("1"), + AccessPolicy: &share.AccessPolicy{ + Start: to.Ptr(time.Now()), + Expiry: to.Ptr(time.Now().Add(time.Hour)), + Permission: &permission, + }, + }} + + _, err = shareClient.SetAccessPolicy(context.TODO(), &share.SetAccessPolicyOptions{ + ShareACL: permissions, + }) + handleError(err) + + resp, err := shareClient.GetAccessPolicy(context.TODO(), nil) + handleError(err) + + fmt.Println(*resp.SignedIdentifiers[0].AccessPolicy.Permission) +} + +func Example_share_Client_CreateGetPermission() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + testSDDL := `O:S-1-5-32-548G:S-1-5-21-397955417-626881126-188441444-512D:(A;;RPWPCCDCLCSWRCWDWOGA;;;S-1-0-0)` + createResp, err := shareClient.CreatePermission(context.TODO(), testSDDL, nil) + handleError(err) + + getResp, err := shareClient.GetPermission(context.TODO(), *createResp.FilePermissionKey, nil) + handleError(err) + fmt.Println(*getResp.Permission) +} + +func Example_share_Client_SetMetadata() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + md := map[string]*string{ + "Foo": to.Ptr("FooValuE"), + "Bar": to.Ptr("bArvaLue"), + } + _, err = shareClient.SetMetadata(context.TODO(), &share.SetMetadataOptions{ + Metadata: md, + }) + handleError(err) + + resp, err := shareClient.GetProperties(context.TODO(), nil) + handleError(err) + for k, v := range resp.Metadata { + fmt.Printf("%v : %v\n", k, *v) + } +} + +func Example_share_Client_GetStatistics() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + getStats, err := shareClient.GetStatistics(context.Background(), nil) + handleError(err) + fmt.Println(*getStats.ShareUsageBytes) +} + +func Example_share_Client_GetSASURL() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + permissions := sas.SharePermissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Create: true, + } + expiry := time.Now().Add(time.Hour) + + shareSASURL, err := shareClient.GetSASURL(permissions, expiry, nil) + handleError(err) + + fmt.Println("SAS URL: ", shareSASURL) + + shareSASClient, err := share.NewClientWithNoCredential(shareSASURL, nil) + handleError(err) + + var dirs, files []string + pager := shareSASClient.NewRootDirectoryClient().NewListFilesAndDirectoriesPager(nil) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + handleError(err) + + for _, d := range resp.Segment.Directories { + dirs = append(dirs, *d.Name) + } + for _, f := range resp.Segment.Files { + files = append(files, *f.Name) + } + } + + fmt.Println("Directories:") + for _, d := range dirs { + fmt.Println(d) + } + + fmt.Println("Files:") + for _, f := range files { + fmt.Println(f) + } +} diff --git a/sdk/storage/azfile/share/models.go b/sdk/storage/azfile/share/models.go new file mode 100644 index 000000000000..5b200ce9429b --- /dev/null +++ b/sdk/storage/azfile/share/models.go @@ -0,0 +1,312 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package share + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "time" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CreateOptions contains the optional parameters for the Client.Create method. +type CreateOptions struct { + // Specifies the access tier of the share. + AccessTier *AccessTier + // Protocols to enable on the share. + EnabledProtocols *string + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // Specifies the maximum size of the share, in gigabytes. + Quota *int32 + // Root squash to set on the share. Only valid for NFS shares. + RootSquash *RootSquash +} + +func (o *CreateOptions) format() *generated.ShareClientCreateOptions { + if o == nil { + return nil + } + + return &generated.ShareClientCreateOptions{ + AccessTier: o.AccessTier, + EnabledProtocols: o.EnabledProtocols, + Metadata: o.Metadata, + Quota: o.Quota, + RootSquash: o.RootSquash, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DeleteOptions contains the optional parameters for the Client.Delete method. +type DeleteOptions struct { + // Specifies the option include to delete the base share and all of its snapshots. + DeleteSnapshots *DeleteSnapshotsOptionType + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *DeleteOptions) format() (*generated.ShareClientDeleteOptions, *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return &generated.ShareClientDeleteOptions{ + DeleteSnapshots: o.DeleteSnapshots, + Sharesnapshot: o.ShareSnapshot, + }, o.LeaseAccessConditions +} + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = generated.LeaseAccessConditions + +// --------------------------------------------------------------------------------------------------------------------- + +// RestoreOptions contains the optional parameters for the Client.Restore method. +type RestoreOptions struct { + // placeholder for future options +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPropertiesOptions contains the optional parameters for the Client.GetProperties method. +type GetPropertiesOptions struct { + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *GetPropertiesOptions) format() (*generated.ShareClientGetPropertiesOptions, *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return &generated.ShareClientGetPropertiesOptions{ + Sharesnapshot: o.ShareSnapshot, + }, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetPropertiesOptions contains the optional parameters for the Client.SetProperties method. +type SetPropertiesOptions struct { + // Specifies the access tier of the share. + AccessTier *AccessTier + // Specifies the maximum size of the share, in gigabytes. + Quota *int32 + // Root squash to set on the share. Only valid for NFS shares. + RootSquash *RootSquash + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *SetPropertiesOptions) format() (*generated.ShareClientSetPropertiesOptions, *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return &generated.ShareClientSetPropertiesOptions{ + AccessTier: o.AccessTier, + Quota: o.Quota, + RootSquash: o.RootSquash, + }, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CreateSnapshotOptions contains the optional parameters for the Client.CreateSnapshot method. +type CreateSnapshotOptions struct { + // A name-value pair to associate with a file storage object. + Metadata map[string]*string +} + +func (o *CreateSnapshotOptions) format() *generated.ShareClientCreateSnapshotOptions { + if o == nil { + return nil + } + + return &generated.ShareClientCreateSnapshotOptions{ + Metadata: o.Metadata, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetAccessPolicyOptions contains the optional parameters for the Client.GetAccessPolicy method. +type GetAccessPolicyOptions struct { + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *GetAccessPolicyOptions) format() (*generated.ShareClientGetAccessPolicyOptions, *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.LeaseAccessConditions +} + +// SignedIdentifier - Signed identifier. +type SignedIdentifier = generated.SignedIdentifier + +// AccessPolicy - An Access policy. +type AccessPolicy = generated.AccessPolicy + +// AccessPolicyPermission type simplifies creating the permissions string for a share's access policy. +// Initialize an instance of this type and then call its String method to set AccessPolicy's permission field. +type AccessPolicyPermission = exported.AccessPolicyPermission + +// --------------------------------------------------------------------------------------------------------------------- + +// SetAccessPolicyOptions contains the optional parameters for the Client.SetAccessPolicy method. +type SetAccessPolicyOptions struct { + // Specifies the ACL for the share. + ShareACL []*SignedIdentifier + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *SetAccessPolicyOptions) format() (*generated.ShareClientSetAccessPolicyOptions, []*SignedIdentifier, *LeaseAccessConditions, error) { + if o == nil { + return nil, nil, nil, nil + } + + if o.ShareACL != nil { + for _, si := range o.ShareACL { + err := formatTime(si) + if err != nil { + return nil, nil, nil, err + } + } + } + + return nil, o.ShareACL, o.LeaseAccessConditions, nil +} + +func formatTime(si *SignedIdentifier) error { + if si.AccessPolicy == nil { + return nil + } + + if si.AccessPolicy.Start != nil { + st, err := time.Parse(time.RFC3339, si.AccessPolicy.Start.UTC().Format(time.RFC3339)) + if err != nil { + return err + } + si.AccessPolicy.Start = &st + } + if si.AccessPolicy.Expiry != nil { + et, err := time.Parse(time.RFC3339, si.AccessPolicy.Expiry.UTC().Format(time.RFC3339)) + if err != nil { + return err + } + si.AccessPolicy.Expiry = &et + } + + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CreatePermissionOptions contains the optional parameters for the Client.CreatePermission method. +type CreatePermissionOptions struct { + // placeholder for future options +} + +func (o *CreatePermissionOptions) format(sharePermission string) (Permission, *generated.ShareClientCreatePermissionOptions) { + return Permission{ + Permission: &sharePermission, + }, nil +} + +// Permission - A permission (a security descriptor) at the share level. +type Permission = generated.SharePermission + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPermissionOptions contains the optional parameters for the Client.GetPermission method. +type GetPermissionOptions struct { + // placeholder for future options +} + +func (o *GetPermissionOptions) format() *generated.ShareClientGetPermissionOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetMetadataOptions contains the optional parameters for the Client.SetMetadata method. +type SetMetadataOptions struct { + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *SetMetadataOptions) format() (*generated.ShareClientSetMetadataOptions, *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return &generated.ShareClientSetMetadataOptions{ + Metadata: o.Metadata, + }, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetStatisticsOptions contains the optional parameters for the Client.GetStatistics method. +type GetStatisticsOptions struct { + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *GetStatisticsOptions) format() (*generated.ShareClientGetStatisticsOptions, *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.LeaseAccessConditions +} + +// Stats - Stats for the share. +type Stats = generated.ShareStats + +// --------------------------------------------------------------------------------------------------------------------- + +// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. +type GetSASURLOptions struct { + StartTime *time.Time +} + +func (o *GetSASURLOptions) format() time.Time { + if o == nil { + return time.Time{} + } + + var st time.Time + if o.StartTime != nil { + st = o.StartTime.UTC() + } else { + st = time.Time{} + } + return st +} diff --git a/sdk/storage/azfile/share/responses.go b/sdk/storage/azfile/share/responses.go new file mode 100644 index 000000000000..2932e7ec93a9 --- /dev/null +++ b/sdk/storage/azfile/share/responses.go @@ -0,0 +1,45 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package share + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + +// CreateResponse contains the response from method Client.Create. +type CreateResponse = generated.ShareClientCreateResponse + +// DeleteResponse contains the response from method Client.Delete. +type DeleteResponse = generated.ShareClientDeleteResponse + +// RestoreResponse contains the response from method Client.Restore. +type RestoreResponse = generated.ShareClientRestoreResponse + +// GetPropertiesResponse contains the response from method Client.GetProperties. +type GetPropertiesResponse = generated.ShareClientGetPropertiesResponse + +// SetPropertiesResponse contains the response from method Client.SetProperties. +type SetPropertiesResponse = generated.ShareClientSetPropertiesResponse + +// CreateSnapshotResponse contains the response from method Client.CreateSnapshot. +type CreateSnapshotResponse = generated.ShareClientCreateSnapshotResponse + +// GetAccessPolicyResponse contains the response from method Client.GetAccessPolicy. +type GetAccessPolicyResponse = generated.ShareClientGetAccessPolicyResponse + +// SetAccessPolicyResponse contains the response from method Client.SetAccessPolicy. +type SetAccessPolicyResponse = generated.ShareClientSetAccessPolicyResponse + +// CreatePermissionResponse contains the response from method Client.CreatePermission. +type CreatePermissionResponse = generated.ShareClientCreatePermissionResponse + +// GetPermissionResponse contains the response from method Client.GetPermission. +type GetPermissionResponse = generated.ShareClientGetPermissionResponse + +// SetMetadataResponse contains the response from method Client.SetMetadata. +type SetMetadataResponse = generated.ShareClientSetMetadataResponse + +// GetStatisticsResponse contains the response from method Client.GetStatistics. +type GetStatisticsResponse = generated.ShareClientGetStatisticsResponse diff --git a/sdk/storage/azfile/test-resources.json b/sdk/storage/azfile/test-resources.json new file mode 100644 index 000000000000..c6259f7ab02f --- /dev/null +++ b/sdk/storage/azfile/test-resources.json @@ -0,0 +1,579 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "baseName": { + "type": "String" + }, + "tenantId": { + "type": "string", + "defaultValue": "72f988bf-86f1-41af-91ab-2d7cd011db47", + "metadata": { + "description": "The tenant ID to which the application and resources belong." + } + }, + "testApplicationOid": { + "type": "string", + "metadata": { + "description": "The principal to assign the role to. This is application object id." + } + } + }, + "variables": { + "mgmtApiVersion": "2022-09-01", + "authorizationApiVersion": "2018-09-01-preview", + "blobDataContributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/ba92f5b4-2d11-453d-a403-e96b0029c9fe')]", + "contributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b24988ac-6180-42a0-ab88-20f7382dd24c')]", + "blobDataOwnerRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b7e6dc6d-f1e8-4753-8033-0f276bb0955b')]", + "primaryAccountName": "[concat(parameters('baseName'), 'prim')]", + "immutableAccountName": "[concat(parameters('baseName'), 'imm')]", + "primaryEncryptionScopeName": "encryptionScope", + "primaryEncryptionScope": "[concat(parameters('baseName'), 'prim', concat('/', variables('primaryEncryptionScopeName')))]", + "secondaryAccountName": "[concat(parameters('baseName'), 'sec')]", + "premiumAccountName": "[concat(parameters('baseName'), 'prem')]", + "dataLakeAccountName": "[concat(parameters('baseName'), 'dtlk')]", + "softDeleteAccountName": "[concat(parameters('baseName'), 'sftdl')]", + "premiumFileAccountName": "[concat(parameters('baseName'), 'pfile')]", + "webjobsPrimaryAccountName": "[concat(parameters('baseName'), 'wjprim')]", + "webjobsSecondaryAccountName": "[concat(parameters('baseName'), 'wjsec')]", + "location": "[resourceGroup().location]", + "resourceGroupName": "[resourceGroup().name]", + "subscriptionId": "[subscription().subscriptionId]", + "encryption": { + "services": { + "file": { + "enabled": true + }, + "blob": { + "enabled": true + } + }, + "keySource": "Microsoft.Storage" + }, + "networkAcls": { + "bypass": "AzureServices", + "virtualNetworkRules": [], + "ipRules": [], + "defaultAction": "Allow" + } + }, + "resources": [ + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('dataContributorRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('blobDataContributorRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('contributorRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('contributorRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('blobDataOwnerRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('blobDataOwnerRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('primaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('primaryAccountName'), '/default')]", + "properties": { + "isVersioningEnabled": true, + "lastAccessTimeTrackingPolicy": { + "enable": true, + "name": "AccessTimeTracking", + "trackingGranularityInDays": 1, + "blobType": [ + "blockBlob" + ] + } + }, + "dependsOn": [ + "[variables('primaryAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('immutableAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot", + "immutableStorageWithVersioning": { + "enabled": true + } + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('immutableAccountName'), '/default')]", + "properties": { + "isVersioningEnabled": true, + "lastAccessTimeTrackingPolicy": { + "enable": true, + "name": "AccessTimeTracking", + "trackingGranularityInDays": 1, + "blobType": [ + "blockBlob" + ] + } + }, + "dependsOn": [ + "[variables('immutableAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts/encryptionScopes", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('primaryEncryptionScope')]", + "properties": { + "source": "Microsoft.Storage", + "state": "Enabled" + }, + "dependsOn": [ + "[variables('primaryAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('secondaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('premiumAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Premium_LRS", + "tier": "Premium" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('dataLakeAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "isHnsEnabled": true, + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('dataLakeAccountName'), '/default')]", + "properties": { + "containerDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('dataLakeAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('softDeleteAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('softDeleteAccountName'), '/default')]", + "properties": { + "isVersioningEnabled": true, + "deleteRetentionPolicy": { + "allowPermanentDelete": true, + "enabled": true, + "days": 1 + }, + "containerDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('softDeleteAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts/fileServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('softDeleteAccountName'), '/default')]", + "properties": { + "shareDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('softDeleteAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('premiumFileAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Premium_LRS", + "tier": "Premium" + }, + "kind": "FileStorage", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('webjobsPrimaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('webjobsSecondaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + } + ], + "functions": [ + { + "namespace": "url", + "members": { + "serviceEndpointSuffix": { + "parameters": [ + { + "name": "endpoint", + "type": "string" + } + ], + "output": { + "type": "string", + "value": "[substring(parameters('endpoint'), add(indexOf(parameters('endpoint'), '.'),1), sub(length(parameters('endpoint')), add(indexOf(parameters('endpoint'), '.'),2)))]" + } + } + } + }, + { + "namespace": "connectionString", + "members": { + "create": { + "parameters": [ + { + "name": "accountName", + "type": "string" + }, + { + "name": "accountKey", + "type": "string" + }, + { + "name": "blobEndpoint", + "type": "string" + }, + { + "name": "queueEndpoint", + "type": "string" + }, + { + "name": "fileEndpoint", + "type": "string" + }, + { + "name": "tableEndpoint", + "type": "string" + } + ], + "output": { + "type": "string", + "value": "[concat('DefaultEndpointsProtocol=https;AccountName=', parameters('accountName'), ';AccountKey=', parameters('accountKey'), ';BlobEndpoint=', parameters('blobEndpoint'), ';QueueEndpoint=', parameters('queueEndpoint'), ';FileEndpoint=', parameters('fileEndpoint'), ';TableEndpoint=', parameters('tableEndpoint'))]" + } + } + } + } + ], + "outputs": { + "AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('primaryAccountName')]" + }, + "AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PRIMARY_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "PRIMARY_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "PRIMARY_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "PRIMARY_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "SECONDARY_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('secondaryAccountName')]" + }, + "SECONDARY_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "SECONDARY_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "SECONDARY_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "SECONDARY_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "SECONDARY_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "BLOB_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('secondaryAccountName')]" + }, + "BLOB_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('premiumAccountName')]" + }, + "PREMIUM_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('premiumAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('premiumAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "DATALAKE_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('dataLakeAccountName')]" + }, + "DATALAKE_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "DATALAKE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "DATALAKE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "DATALAKE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "DATALAKE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('immutableAccountName')]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "IMMUTABLE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('softDeleteAccountName')]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('premiumFileAccountName')]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('premiumFileAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('premiumFileAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "AZUREWEBJOBSSTORAGE": { + "type": "string", + "value": "[connectionString.create(variables('webjobsPrimaryAccountName'), listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).keys[0].value, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "AZUREWEBJOBSSECONDARYSTORAGE": { + "type": "string", + "value": "[connectionString.create(variables('webjobsSecondaryAccountName'), listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).keys[0].value, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "RESOURCE_GROUP_NAME": { + "type": "string", + "value": "[variables('resourceGroupName')]" + }, + "SUBSCRIPTION_ID": { + "type": "string", + "value": "[variables('subscriptionId')]" + }, + "LOCATION": { + "type": "string", + "value": "[variables('location')]" + }, + "AZURE_STORAGE_ENCRYPTION_SCOPE": { + "type": "string", + "value": "[variables('primaryEncryptionScopeName')]" + } + } + } + \ No newline at end of file