diff --git a/sdk/storage/azdatalake/assets.json b/sdk/storage/azdatalake/assets.json new file mode 100644 index 000000000000..cc6130dbb776 --- /dev/null +++ b/sdk/storage/azdatalake/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "go", + "TagPrefix": "go/storage/azdatalake", + "Tag": "go/storage/azdatalake_e05ec93d89" +} \ No newline at end of file diff --git a/sdk/storage/azdatalake/common.go b/sdk/storage/azdatalake/common.go index fb79dcc0dc7a..fc67050f51ee 100644 --- a/sdk/storage/azdatalake/common.go +++ b/sdk/storage/azdatalake/common.go @@ -7,17 +7,30 @@ package azdatalake import ( - "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/sas" ) -// AccessConditions identifies container-specific access conditions which you optionally set. -type AccessConditions struct { - ModifiedAccessConditions *ModifiedAccessConditions - LeaseAccessConditions *LeaseAccessConditions +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) } -// LeaseAccessConditions contains optional parameters to access leased entity. -type LeaseAccessConditions = generated.LeaseAccessConditions +// URLParts object represents the components that make up an Azure Storage Container/Blob URL. +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type URLParts = sas.URLParts + +// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. Any other +// query parameters remain in the UnparsedParams field. This method overwrites all fields in the URLParts object. +func ParseURL(u string) (URLParts, error) { + return sas.ParseURL(u) +} -// ModifiedAccessConditions contains a group of parameters for specifying access conditions. -type ModifiedAccessConditions = generated.ModifiedAccessConditions +// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange +// which has an offset but no zero value count indicates from the offset to the resource's end. +type HTTPRange = exported.HTTPRange diff --git a/sdk/storage/azdatalake/datalakeerror/error_codes.go b/sdk/storage/azdatalake/datalakeerror/error_codes.go new file mode 100644 index 000000000000..2acc2e79fdc6 --- /dev/null +++ b/sdk/storage/azdatalake/datalakeerror/error_codes.go @@ -0,0 +1,181 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package datalakeerror + +import ( + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) + +// HasCode returns true if the provided error is an *azcore.ResponseError +// with its ErrorCode field equal to one of the specified Codes. +func HasCode(err error, codes ...Code) bool { + var respErr *azcore.ResponseError + if !errors.As(err, &respErr) { + return false + } + + for _, code := range codes { + if respErr.ErrorCode == string(code) { + return true + } + } + + return false +} + +type StorageErrorCode string + +// Code - Error codes returned by the service +type Code = bloberror.Code + +const ( + ContentLengthMustBeZero StorageErrorCode = "ContentLengthMustBeZero" + PathAlreadyExists StorageErrorCode = "PathAlreadyExists" + InvalidFlushPosition StorageErrorCode = "InvalidFlushPosition" + InvalidPropertyName StorageErrorCode = "InvalidPropertyName" + InvalidSourceURI StorageErrorCode = "InvalidSourceUri" + UnsupportedRestVersion StorageErrorCode = "UnsupportedRestVersion" + FileSystemNotFound StorageErrorCode = "FilesystemNotFound" + PathNotFound StorageErrorCode = "PathNotFound" + RenameDestinationParentPathNotFound StorageErrorCode = "RenameDestinationParentPathNotFound" + SourcePathNotFound StorageErrorCode = "SourcePathNotFound" + DestinationPathIsBeingDeleted StorageErrorCode = "DestinationPathIsBeingDeleted" + FileSystemAlreadyExists StorageErrorCode = "FilesystemAlreadyExists" + FileSystemBeingDeleted StorageErrorCode = "FilesystemBeingDeleted" + InvalidDestinationPath StorageErrorCode = "InvalidDestinationPath" + InvalidRenameSourcePath StorageErrorCode = "InvalidRenameSourcePath" + InvalidSourceOrDestinationResourceType StorageErrorCode = "InvalidSourceOrDestinationResourceType" + LeaseIsAlreadyBroken StorageErrorCode = "LeaseIsAlreadyBroken" + LeaseNameMismatch StorageErrorCode = "LeaseNameMismatch" + PathConflict StorageErrorCode = "PathConflict" + SourcePathIsBeingDeleted StorageErrorCode = "SourcePathIsBeingDeleted" +) + +const ( + AccountAlreadyExists Code = "AccountAlreadyExists" + AccountBeingCreated Code = "AccountBeingCreated" + AccountIsDisabled Code = "AccountIsDisabled" + AppendPositionConditionNotMet Code = "AppendPositionConditionNotMet" + AuthenticationFailed Code = "AuthenticationFailed" + AuthorizationFailure Code = "AuthorizationFailure" + AuthorizationPermissionMismatch Code = "AuthorizationPermissionMismatch" + AuthorizationProtocolMismatch Code = "AuthorizationProtocolMismatch" + AuthorizationResourceTypeMismatch Code = "AuthorizationResourceTypeMismatch" + AuthorizationServiceMismatch Code = "AuthorizationServiceMismatch" + AuthorizationSourceIPMismatch Code = "AuthorizationSourceIPMismatch" + BlobAlreadyExists Code = "BlobAlreadyExists" + PathArchived Code = "BlobArchived" + PathBeingRehydrated Code = "BlobBeingRehydrated" + PathImmutableDueToPolicy Code = "BlobImmutableDueToPolicy" + PathNotArchived Code = "BlobNotArchived" + BlobNotFound Code = "BlobNotFound" + PathOverwritten Code = "BlobOverwritten" + PathTierInadequateForContentLength Code = "BlobTierInadequateForContentLength" + PathUsesCustomerSpecifiedEncryption Code = "BlobUsesCustomerSpecifiedEncryption" + BlockCountExceedsLimit Code = "BlockCountExceedsLimit" + BlockListTooLong Code = "BlockListTooLong" + CannotChangeToLowerTier Code = "CannotChangeToLowerTier" + CannotVerifyCopySource Code = "CannotVerifyCopySource" + ConditionHeadersNotSupported Code = "ConditionHeadersNotSupported" + ConditionNotMet Code = "ConditionNotMet" + FilesystemAlreadyExists Code = "ContainerAlreadyExists" + ContainerBeingDeleted Code = "ContainerBeingDeleted" + ContainerDisabled Code = "ContainerDisabled" + ContainerNotFound Code = "ContainerNotFound" + ContentLengthLargerThanTierLimit Code = "ContentLengthLargerThanTierLimit" + CopyAcrossAccountsNotSupported Code = "CopyAcrossAccountsNotSupported" + CopyIDMismatch Code = "CopyIdMismatch" + EmptyMetadataKey Code = "EmptyMetadataKey" + FeatureVersionMismatch Code = "FeatureVersionMismatch" + IncrementalCopyPathMismatch Code = "IncrementalCopyBlobMismatch" + IncrementalCopyOfEralierVersionSnapshotNotAllowed Code = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + IncrementalCopySourceMustBeSnapshot Code = "IncrementalCopySourceMustBeSnapshot" + InfiniteLeaseDurationRequired Code = "InfiniteLeaseDurationRequired" + InsufficientAccountPermissions Code = "InsufficientAccountPermissions" + InternalError Code = "InternalError" + InvalidAuthenticationInfo Code = "InvalidAuthenticationInfo" + InvalidBlobOrBlock Code = "InvalidBlobOrBlock" + InvalidPathTier Code = "InvalidBlobTier" + InvalidPathType Code = "InvalidBlobType" + InvalidBlockID Code = "InvalidBlockId" + InvalidBlockList Code = "InvalidBlockList" + InvalidHTTPVerb Code = "InvalidHttpVerb" + InvalidHeaderValue Code = "InvalidHeaderValue" + InvalidInput Code = "InvalidInput" + InvalidMD5 Code = "InvalidMd5" + InvalidMetadata Code = "InvalidMetadata" + InvalidOperation Code = "InvalidOperation" + InvalidPageRange Code = "InvalidPageRange" + InvalidQueryParameterValue Code = "InvalidQueryParameterValue" + InvalidRange Code = "InvalidRange" + InvalidResourceName Code = "InvalidResourceName" + InvalidSourcePathType Code = "InvalidSourceBlobType" + InvalidSourcePathURL Code = "InvalidSourceBlobUrl" + InvalidURI Code = "InvalidUri" + InvalidVersionForPageBlobOperation Code = "InvalidVersionForPageBlobOperation" + InvalidXMLDocument Code = "InvalidXmlDocument" + InvalidXMLNodeValue Code = "InvalidXmlNodeValue" + LeaseAlreadyBroken Code = "LeaseAlreadyBroken" + LeaseAlreadyPresent Code = "LeaseAlreadyPresent" + LeaseIDMismatchWithBlobOperation Code = "LeaseIdMismatchWithBlobOperation" + LeaseIDMismatchWithContainerOperation Code = "LeaseIdMismatchWithContainerOperation" + LeaseIDMismatchWithLeaseOperation Code = "LeaseIdMismatchWithLeaseOperation" + LeaseIDMissing Code = "LeaseIdMissing" + LeaseIsBreakingAndCannotBeAcquired Code = "LeaseIsBreakingAndCannotBeAcquired" + LeaseIsBreakingAndCannotBeChanged Code = "LeaseIsBreakingAndCannotBeChanged" + LeaseIsBrokenAndCannotBeRenewed Code = "LeaseIsBrokenAndCannotBeRenewed" + LeaseLost Code = "LeaseLost" + LeaseNotPresentWithBlobOperation Code = "LeaseNotPresentWithBlobOperation" + LeaseNotPresentWithContainerOperation Code = "LeaseNotPresentWithContainerOperation" + LeaseNotPresentWithLeaseOperation Code = "LeaseNotPresentWithLeaseOperation" + MD5Mismatch Code = "Md5Mismatch" + CRC64Mismatch Code = "Crc64Mismatch" + MaxBlobSizeConditionNotMet Code = "MaxBlobSizeConditionNotMet" + MetadataTooLarge Code = "MetadataTooLarge" + MissingContentLengthHeader Code = "MissingContentLengthHeader" + MissingRequiredHeader Code = "MissingRequiredHeader" + MissingRequiredQueryParameter Code = "MissingRequiredQueryParameter" + MissingRequiredXMLNode Code = "MissingRequiredXmlNode" + MultipleConditionHeadersNotSupported Code = "MultipleConditionHeadersNotSupported" + NoAuthenticationInformation Code = "NoAuthenticationInformation" + NoPendingCopyOperation Code = "NoPendingCopyOperation" + OperationNotAllowedOnIncrementalCopyBlob Code = "OperationNotAllowedOnIncrementalCopyBlob" + OperationTimedOut Code = "OperationTimedOut" + OutOfRangeInput Code = "OutOfRangeInput" + OutOfRangeQueryParameterValue Code = "OutOfRangeQueryParameterValue" + PendingCopyOperation Code = "PendingCopyOperation" + PreviousSnapshotCannotBeNewer Code = "PreviousSnapshotCannotBeNewer" + PreviousSnapshotNotFound Code = "PreviousSnapshotNotFound" + PreviousSnapshotOperationNotSupported Code = "PreviousSnapshotOperationNotSupported" + RequestBodyTooLarge Code = "RequestBodyTooLarge" + RequestURLFailedToParse Code = "RequestUrlFailedToParse" + ResourceAlreadyExists Code = "ResourceAlreadyExists" + ResourceNotFound Code = "ResourceNotFound" + ResourceTypeMismatch Code = "ResourceTypeMismatch" + SequenceNumberConditionNotMet Code = "SequenceNumberConditionNotMet" + SequenceNumberIncrementTooLarge Code = "SequenceNumberIncrementTooLarge" + ServerBusy Code = "ServerBusy" + SnapshotCountExceeded Code = "SnapshotCountExceeded" + SnapshotOperationRateExceeded Code = "SnapshotOperationRateExceeded" + SnapshotsPresent Code = "SnapshotsPresent" + SourceConditionNotMet Code = "SourceConditionNotMet" + SystemInUse Code = "SystemInUse" + TargetConditionNotMet Code = "TargetConditionNotMet" + UnauthorizedBlobOverwrite Code = "UnauthorizedBlobOverwrite" + UnsupportedHTTPVerb Code = "UnsupportedHttpVerb" + UnsupportedHeader Code = "UnsupportedHeader" + UnsupportedQueryParameter Code = "UnsupportedQueryParameter" + UnsupportedXMLNode Code = "UnsupportedXmlNode" +) + +var ( + // MissingSharedKeyCredential - Error is returned when SAS URL is being created without SharedKeyCredential. + MissingSharedKeyCredential = bloberror.MissingSharedKeyCredential +) diff --git a/sdk/storage/azdatalake/directory/client.go b/sdk/storage/azdatalake/directory/client.go index 6aaf8bf63138..1cdb330d3d0e 100644 --- a/sdk/storage/azdatalake/directory/client.go +++ b/sdk/storage/azdatalake/directory/client.go @@ -25,11 +25,41 @@ type ClientOptions base.ClientOptions // Client represents a URL to the Azure Datalake Storage service. type Client base.CompositeClient[generated.PathClient, generated.PathClient, blob.Client] -//TODO: NewClient() +// NewClient creates an instance of Client with the specified values. +// - directoryURL - the URL of the directory e.g. https://.dfs.core.windows.net/fs/dir +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(directoryURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + blobURL := strings.Replace(directoryURL, ".dfs.", ".blob.", 1) + directoryURL = strings.Replace(directoryURL, ".blob.", ".dfs.", 1) + + authPolicy := shared.NewStorageChallengePolicy(cred) + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{ + PerRetry: []policy.Policy{authPolicy}, + } + base.SetPipelineOptions((*base.ClientOptions)(conOptions), &plOpts) + + azClient, err := azcore.NewClient(shared.FileClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + if options == nil { + options = &ClientOptions{} + } + blobClientOpts := blob.ClientOptions{ + ClientOptions: options.ClientOptions, + } + blobClient, _ := blob.NewClient(blobURL, cred, &blobClientOpts) + dirClient := base.NewPathClient(directoryURL, blobURL, blobClient, azClient, nil, (*base.ClientOptions)(conOptions)) + + return (*Client)(dirClient), nil +} // NewClientWithNoCredential creates an instance of Client with the specified values. // This is used to anonymously access a storage account or with a shared access signature (SAS) token. -// - serviceURL - the URL of the storage account e.g. https://.dfs.core.windows.net/? +// - directoryURL - the URL of the storage account e.g. https://.dfs.core.windows.net/fs/dir? // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(directoryURL string, options *ClientOptions) (*Client, error) { blobURL := strings.Replace(directoryURL, ".dfs.", ".blob.", 1) @@ -44,6 +74,9 @@ func NewClientWithNoCredential(directoryURL string, options *ClientOptions) (*Cl return nil, err } + if options == nil { + options = &ClientOptions{} + } blobClientOpts := blob.ClientOptions{ ClientOptions: options.ClientOptions, } @@ -54,7 +87,7 @@ func NewClientWithNoCredential(directoryURL string, options *ClientOptions) (*Cl } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. -// - serviceURL - the URL of the storage account e.g. https://.dfs.core.windows.net/ +// - directoryURL - the URL of the storage account e.g. https://.dfs.core.windows.net/fs/dir // - cred - a SharedKeyCredential created with the matching storage account and access key // - options - client options; pass nil to accept the default values func NewClientWithSharedKeyCredential(directoryURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { @@ -73,6 +106,9 @@ func NewClientWithSharedKeyCredential(directoryURL string, cred *SharedKeyCreden return nil, err } + if options == nil { + options = &ClientOptions{} + } blobClientOpts := blob.ClientOptions{ ClientOptions: options.ClientOptions, } @@ -103,13 +139,13 @@ func NewClientFromConnectionString(connectionString string, options *ClientOptio return NewClientWithNoCredential(parsed.ServiceURL, options) } -func (d *Client) generatedFSClientWithDFS() *generated.PathClient { +func (d *Client) generatedDirClientWithDFS() *generated.PathClient { //base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) dirClientWithDFS, _, _ := base.InnerClients((*base.CompositeClient[generated.PathClient, generated.PathClient, blob.Client])(d)) return dirClientWithDFS } -func (d *Client) generatedFSClientWithBlob() *generated.PathClient { +func (d *Client) generatedDirClientWithBlob() *generated.PathClient { _, dirClientWithBlob, _ := base.InnerClients((*base.CompositeClient[generated.PathClient, generated.PathClient, blob.Client])(d)) return dirClientWithBlob } @@ -123,9 +159,14 @@ func (d *Client) sharedKey() *exported.SharedKeyCredential { return base.SharedKeyComposite((*base.CompositeClient[generated.PathClient, generated.PathClient, blob.Client])(d)) } -// URL returns the URL endpoint used by the Client object. -func (d *Client) URL() string { - return "s.generated().Endpoint()" +// DFSURL returns the URL endpoint used by the Client object. +func (d *Client) DFSURL() string { + return d.generatedDirClientWithDFS().Endpoint() +} + +// BlobURL returns the URL endpoint used by the Client object. +func (d *Client) BlobURL() string { + return d.generatedDirClientWithBlob().Endpoint() } // Create creates a new directory (dfs1). @@ -190,3 +231,8 @@ func (d *Client) SetHTTPHeaders(ctx context.Context, httpHeaders HTTPHeaders, op // TODO: call into blob return SetHTTPHeadersResponse{}, nil } + +// UndeletePath restores the specified path that was previously deleted. (dfs op/blob2). +func (d *Client) UndeletePath(ctx context.Context, path string, options *UndeletePathOptions) (UndeletePathResponse, error) { + return UndeletePathResponse{}, nil +} diff --git a/sdk/storage/azdatalake/directory/models.go b/sdk/storage/azdatalake/directory/models.go index e349e0478da8..d8ad23d234f9 100644 --- a/sdk/storage/azdatalake/directory/models.go +++ b/sdk/storage/azdatalake/directory/models.go @@ -8,17 +8,15 @@ package directory import ( "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake" "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/shared" "time" ) // CreateOptions contains the optional parameters when calling the Create operation. dfs endpoint type CreateOptions struct { // AccessConditions contains parameters for accessing the file. - AccessConditions *azdatalake.AccessConditions + AccessConditions *AccessConditions // Metadata is a map of name-value pairs to associate with the file storage object. Metadata map[string]*string // CPKInfo contains a group of parameters for client provided encryption key. @@ -44,7 +42,7 @@ type CreateOptions struct { func (o *CreateOptions) format() (*generated.LeaseAccessConditions, *generated.ModifiedAccessConditions, *generated.PathHTTPHeaders, error) { // TODO: add all other required options for the create operation, we don't need sourceModAccCond since this is not rename - leaseAccessConditions, modifiedAccessConditions := shared.FormatPathAccessConditions(o.AccessConditions) + leaseAccessConditions, modifiedAccessConditions := exported.FormatPathAccessConditions(o.AccessConditions) httpHeaders := &generated.PathHTTPHeaders{ CacheControl: o.HTTPHeaders.CacheControl, ContentDisposition: o.HTTPHeaders.ContentDisposition, @@ -60,11 +58,11 @@ func (o *CreateOptions) format() (*generated.LeaseAccessConditions, *generated.M // DeleteOptions contains the optional parameters when calling the Delete operation. dfs endpoint type DeleteOptions struct { // AccessConditions specifies parameters for accessing the directory - AccessConditions *azdatalake.AccessConditions + AccessConditions *AccessConditions } func (o *DeleteOptions) format() (*generated.LeaseAccessConditions, *generated.ModifiedAccessConditions, error) { - leaseAccessConditions, modifiedAccessConditions := shared.FormatPathAccessConditions(o.AccessConditions) + leaseAccessConditions, modifiedAccessConditions := exported.FormatPathAccessConditions(o.AccessConditions) return leaseAccessConditions, modifiedAccessConditions, nil } @@ -72,12 +70,12 @@ type RenameOptions struct { // SourceModifiedAccessConditions specifies parameters for accessing the source directory SourceModifiedAccessConditions *SourceModifiedAccessConditions // AccessConditions specifies parameters for accessing the destination directory - AccessConditions *azdatalake.AccessConditions + AccessConditions *AccessConditions } // GetPropertiesOptions contains the optional parameters for the Client.GetProperties method type GetPropertiesOptions struct { - AccessConditions *azdatalake.AccessConditions + AccessConditions *AccessConditions CPKInfo *CPKInfo } @@ -85,7 +83,7 @@ func (o *GetPropertiesOptions) format() *blob.GetPropertiesOptions { if o == nil { return nil } - accessConditions := shared.FormatBlobAccessConditions(o.AccessConditions) + accessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) return &blob.GetPropertiesOptions{ AccessConditions: accessConditions, CPKInfo: &blob.CPKInfo{ @@ -109,7 +107,7 @@ type SetAccessControlOptions struct { // Permissions is the octal representation of the permissions for user, group and mask. Permissions *string // AccessConditions contains parameters for accessing the path. - AccessConditions *azdatalake.AccessConditions + AccessConditions *AccessConditions } func (o *SetAccessControlOptions) format() (*generated.PathClientSetAccessControlOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions, error) { @@ -117,7 +115,7 @@ func (o *SetAccessControlOptions) format() (*generated.PathClientSetAccessContro return nil, nil, nil, nil } // call path formatter since we're hitting dfs in this operation - leaseAccessConditions, modifiedAccessConditions := shared.FormatPathAccessConditions(o.AccessConditions) + leaseAccessConditions, modifiedAccessConditions := exported.FormatPathAccessConditions(o.AccessConditions) return &generated.PathClientSetAccessControlOptions{ Owner: o.Owner, Group: o.Group, @@ -131,7 +129,7 @@ type GetAccessControlOptions struct { // UPN is the user principal name. UPN *bool // AccessConditions contains parameters for accessing the path. - AccessConditions *azdatalake.AccessConditions + AccessConditions *AccessConditions } func (o *GetAccessControlOptions) format() (*generated.PathClientGetPropertiesOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions, error) { @@ -142,7 +140,7 @@ func (o *GetAccessControlOptions) format() (*generated.PathClientGetPropertiesOp }, nil, nil, nil } // call path formatter since we're hitting dfs in this operation - leaseAccessConditions, modifiedAccessConditions := shared.FormatPathAccessConditions(o.AccessConditions) + leaseAccessConditions, modifiedAccessConditions := exported.FormatPathAccessConditions(o.AccessConditions) return &generated.PathClientGetPropertiesOptions{ Upn: o.UPN, Action: &action, @@ -208,14 +206,14 @@ func (o *RemoveAccessControlRecursiveOptions) format() (*generated.PathClientSet // SetHTTPHeadersOptions contains the optional parameters for the Client.SetHTTPHeaders method. type SetHTTPHeadersOptions struct { - AccessConditions *azdatalake.AccessConditions + AccessConditions *AccessConditions } func (o *SetHTTPHeadersOptions) format() *blob.SetHTTPHeadersOptions { if o == nil { return nil } - accessConditions := shared.FormatBlobAccessConditions(o.AccessConditions) + accessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) return &blob.SetHTTPHeadersOptions{ AccessConditions: accessConditions, } @@ -273,7 +271,7 @@ func (o *HTTPHeaders) formatPathHTTPHeaders() (*generated.PathHTTPHeaders, error // SetMetadataOptions provides set of configurations for Set Metadata on path operation type SetMetadataOptions struct { - AccessConditions *azdatalake.AccessConditions + AccessConditions *AccessConditions CPKInfo *CPKInfo CPKScopeInfo *CPKScopeInfo } @@ -282,7 +280,7 @@ func (o *SetMetadataOptions) format() *blob.SetMetadataOptions { if o == nil { return nil } - accessConditions := shared.FormatBlobAccessConditions(o.AccessConditions) + accessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) return &blob.SetMetadataOptions{ AccessConditions: accessConditions, CPKInfo: &blob.CPKInfo{ @@ -308,8 +306,29 @@ type CPKScopeInfo struct { EncryptionScope *string } +// UndeletePathOptions contains the optional parameters for the Filesystem.UndeletePath operation. +type UndeletePathOptions struct { + // placeholder +} + +func (o *UndeletePathOptions) format() *UndeletePathOptions { + if o == nil { + return nil + } + return &UndeletePathOptions{} +} + // SourceModifiedAccessConditions identifies the source path access conditions. type SourceModifiedAccessConditions = generated.SourceModifiedAccessConditions // SharedKeyCredential contains an account's name and its primary or secondary key. type SharedKeyCredential = exported.SharedKeyCredential + +// AccessConditions identifies blob-specific access conditions which you optionally set. +type AccessConditions = exported.AccessConditions + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = exported.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = exported.ModifiedAccessConditions diff --git a/sdk/storage/azdatalake/directory/responses.go b/sdk/storage/azdatalake/directory/responses.go index 3327f41360ce..6a4f34714df9 100644 --- a/sdk/storage/azdatalake/directory/responses.go +++ b/sdk/storage/azdatalake/directory/responses.go @@ -43,3 +43,6 @@ type SetMetadataResponse = blob.SetMetadataResponse // SetHTTPHeadersResponse contains the response fields for the SetHTTPHeaders operation. type SetHTTPHeadersResponse = blob.SetHTTPHeadersResponse + +// UndeletePathResponse contains the response from method FilesystemClient.UndeletePath. +type UndeletePathResponse = generated.PathClientUndeleteResponse diff --git a/sdk/storage/azdatalake/file/client.go b/sdk/storage/azdatalake/file/client.go index f76101a3c5b3..f6fd6f09e6b3 100644 --- a/sdk/storage/azdatalake/file/client.go +++ b/sdk/storage/azdatalake/file/client.go @@ -25,11 +25,41 @@ type ClientOptions base.ClientOptions // Client represents a URL to the Azure Datalake Storage service. type Client base.CompositeClient[generated.PathClient, generated.PathClient, blob.Client] -//TODO: NewClient() +// NewClient creates an instance of Client with the specified values. +// - fileURL - the URL of the blob e.g. https://.dfs.core.windows.net/fs/file.txt +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(fileURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + blobURL := strings.Replace(fileURL, ".dfs.", ".blob.", 1) + fileURL = strings.Replace(fileURL, ".blob.", ".dfs.", 1) + + authPolicy := shared.NewStorageChallengePolicy(cred) + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{ + PerRetry: []policy.Policy{authPolicy}, + } + base.SetPipelineOptions((*base.ClientOptions)(conOptions), &plOpts) + + azClient, err := azcore.NewClient(shared.FileClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + if options == nil { + options = &ClientOptions{} + } + blobClientOpts := blob.ClientOptions{ + ClientOptions: options.ClientOptions, + } + blobClient, _ := blob.NewClient(blobURL, cred, &blobClientOpts) + fileClient := base.NewPathClient(fileURL, blobURL, blobClient, azClient, nil, (*base.ClientOptions)(conOptions)) + + return (*Client)(fileClient), nil +} // NewClientWithNoCredential creates an instance of Client with the specified values. // This is used to anonymously access a storage account or with a shared access signature (SAS) token. -// - serviceURL - the URL of the storage account e.g. https://.dfs.core.windows.net/? +// - fileURL - the URL of the storage account e.g. https://.dfs.core.windows.net/fs/file.txt? // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(fileURL string, options *ClientOptions) (*Client, error) { blobURL := strings.Replace(fileURL, ".dfs.", ".blob.", 1) @@ -44,6 +74,9 @@ func NewClientWithNoCredential(fileURL string, options *ClientOptions) (*Client, return nil, err } + if options == nil { + options = &ClientOptions{} + } blobClientOpts := blob.ClientOptions{ ClientOptions: options.ClientOptions, } @@ -54,7 +87,7 @@ func NewClientWithNoCredential(fileURL string, options *ClientOptions) (*Client, } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. -// - serviceURL - the URL of the storage account e.g. https://.dfs.core.windows.net/ +// - fileURL - the URL of the storage account e.g. https://.dfs.core.windows.net/fs/file.txt // - cred - a SharedKeyCredential created with the matching storage account and access key // - options - client options; pass nil to accept the default values func NewClientWithSharedKeyCredential(fileURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { @@ -73,6 +106,9 @@ func NewClientWithSharedKeyCredential(fileURL string, cred *SharedKeyCredential, return nil, err } + if options == nil { + options = &ClientOptions{} + } blobClientOpts := blob.ClientOptions{ ClientOptions: options.ClientOptions, } @@ -103,13 +139,13 @@ func NewClientFromConnectionString(connectionString string, options *ClientOptio return NewClientWithNoCredential(parsed.ServiceURL, options) } -func (f *Client) generatedFSClientWithDFS() *generated.PathClient { +func (f *Client) generatedFileClientWithDFS() *generated.PathClient { //base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) dirClientWithDFS, _, _ := base.InnerClients((*base.CompositeClient[generated.PathClient, generated.PathClient, blob.Client])(f)) return dirClientWithDFS } -func (f *Client) generatedFSClientWithBlob() *generated.PathClient { +func (f *Client) generatedFileClientWithBlob() *generated.PathClient { _, dirClientWithBlob, _ := base.InnerClients((*base.CompositeClient[generated.PathClient, generated.PathClient, blob.Client])(f)) return dirClientWithBlob } @@ -123,9 +159,14 @@ func (f *Client) sharedKey() *exported.SharedKeyCredential { return base.SharedKeyComposite((*base.CompositeClient[generated.PathClient, generated.PathClient, blob.Client])(f)) } -// URL returns the URL endpoint used by the Client object. -func (f *Client) URL() string { - return "s.generated().Endpoint()" +// DFSURL returns the URL endpoint used by the Client object. +func (f *Client) DFSURL() string { + return f.generatedFileClientWithDFS().Endpoint() +} + +// BlobURL returns the URL endpoint used by the Client object. +func (f *Client) BlobURL() string { + return f.generatedFileClientWithBlob().Endpoint() } // Create creates a new file (dfs1). @@ -218,3 +259,8 @@ func (f *Client) SetHTTPHeaders(ctx context.Context, httpHeaders HTTPHeaders, op // TODO: call into blob return SetHTTPHeadersResponse{}, nil } + +// UndeletePath restores the specified path that was previously deleted. (dfs op/blob2). +func (f *Client) UndeletePath(ctx context.Context, path string, options *UndeletePathOptions) (UndeletePathResponse, error) { + return UndeletePathResponse{}, nil +} diff --git a/sdk/storage/azdatalake/file/models.go b/sdk/storage/azdatalake/file/models.go index 18551b86a14d..d05d0953d430 100644 --- a/sdk/storage/azdatalake/file/models.go +++ b/sdk/storage/azdatalake/file/models.go @@ -8,17 +8,15 @@ package file import ( "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake" "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/shared" "time" ) // CreateOptions contains the optional parameters when calling the Create operation. dfs endpoint. TODO: Design formatter type CreateOptions struct { // AccessConditions contains parameters for accessing the file. - AccessConditions *azdatalake.AccessConditions + AccessConditions *AccessConditions // Metadata is a map of name-value pairs to associate with the file storage object. Metadata map[string]*string // CPKInfo contains a group of parameters for client provided encryption key. @@ -46,7 +44,7 @@ type CreateOptions struct { func (o *CreateOptions) format() (*generated.LeaseAccessConditions, *generated.ModifiedAccessConditions, *generated.PathHTTPHeaders, error) { // TODO: add all other required options for the create operation, we don't need sourceModAccCond since this is not rename - leaseAccessConditions, modifiedAccessConditions := shared.FormatPathAccessConditions(o.AccessConditions) + leaseAccessConditions, modifiedAccessConditions := exported.FormatPathAccessConditions(o.AccessConditions) httpHeaders := &generated.PathHTTPHeaders{ CacheControl: o.HTTPHeaders.CacheControl, ContentDisposition: o.HTTPHeaders.ContentDisposition, @@ -62,11 +60,11 @@ func (o *CreateOptions) format() (*generated.LeaseAccessConditions, *generated.M // DeleteOptions contains the optional parameters when calling the Delete operation. dfs endpoint type DeleteOptions struct { // AccessConditions contains parameters for accessing the file. - AccessConditions *azdatalake.AccessConditions + AccessConditions *AccessConditions } func (o *DeleteOptions) format() (*generated.LeaseAccessConditions, *generated.ModifiedAccessConditions, error) { - leaseAccessConditions, modifiedAccessConditions := shared.FormatPathAccessConditions(o.AccessConditions) + leaseAccessConditions, modifiedAccessConditions := exported.FormatPathAccessConditions(o.AccessConditions) return leaseAccessConditions, modifiedAccessConditions, nil } @@ -75,12 +73,12 @@ type RenameOptions struct { // SourceModifiedAccessConditions identifies the source path access conditions. SourceModifiedAccessConditions *SourceModifiedAccessConditions // AccessConditions contains parameters for accessing the file. - AccessConditions *azdatalake.AccessConditions + AccessConditions *AccessConditions } // GetPropertiesOptions contains the optional parameters for the Client.GetProperties method type GetPropertiesOptions struct { - AccessConditions *azdatalake.AccessConditions + AccessConditions *AccessConditions CPKInfo *CPKInfo } @@ -88,7 +86,7 @@ func (o *GetPropertiesOptions) format() *blob.GetPropertiesOptions { if o == nil { return nil } - accessConditions := shared.FormatBlobAccessConditions(o.AccessConditions) + accessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) return &blob.GetPropertiesOptions{ AccessConditions: accessConditions, CPKInfo: &blob.CPKInfo{ @@ -111,7 +109,7 @@ type SetAccessControlOptions struct { // Permissions is the octal representation of the permissions for user, group and mask. Permissions *string // AccessConditions contains parameters for accessing the path. - AccessConditions *azdatalake.AccessConditions + AccessConditions *AccessConditions } func (o *SetAccessControlOptions) format() (*generated.PathClientSetAccessControlOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions, error) { @@ -119,7 +117,7 @@ func (o *SetAccessControlOptions) format() (*generated.PathClientSetAccessContro return nil, nil, nil, nil } // call path formatter since we're hitting dfs in this operation - leaseAccessConditions, modifiedAccessConditions := shared.FormatPathAccessConditions(o.AccessConditions) + leaseAccessConditions, modifiedAccessConditions := exported.FormatPathAccessConditions(o.AccessConditions) return &generated.PathClientSetAccessControlOptions{ Owner: o.Owner, Group: o.Group, @@ -133,7 +131,7 @@ type GetAccessControlOptions struct { // UPN is the user principal name. UPN *bool // AccessConditions contains parameters for accessing the path. - AccessConditions *azdatalake.AccessConditions + AccessConditions *AccessConditions } func (o *GetAccessControlOptions) format() (*generated.PathClientGetPropertiesOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions, error) { @@ -144,7 +142,7 @@ func (o *GetAccessControlOptions) format() (*generated.PathClientGetPropertiesOp }, nil, nil, nil } // call path formatter since we're hitting dfs in this operation - leaseAccessConditions, modifiedAccessConditions := shared.FormatPathAccessConditions(o.AccessConditions) + leaseAccessConditions, modifiedAccessConditions := exported.FormatPathAccessConditions(o.AccessConditions) return &generated.PathClientGetPropertiesOptions{ Upn: o.UPN, Action: &action, @@ -210,14 +208,14 @@ func (o *RemoveAccessControlRecursiveOptions) format() (*generated.PathClientSet // SetHTTPHeadersOptions contains the optional parameters for the Client.SetHTTPHeaders method. type SetHTTPHeadersOptions struct { - AccessConditions *azdatalake.AccessConditions + AccessConditions *AccessConditions } func (o *SetHTTPHeadersOptions) format() *blob.SetHTTPHeadersOptions { if o == nil { return nil } - accessConditions := shared.FormatBlobAccessConditions(o.AccessConditions) + accessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) return &blob.SetHTTPHeadersOptions{ AccessConditions: accessConditions, } @@ -275,7 +273,7 @@ func (o *HTTPHeaders) formatPathHTTPHeaders() (*generated.PathHTTPHeaders, error // SetMetadataOptions provides set of configurations for Set Metadata on path operation type SetMetadataOptions struct { - AccessConditions *azdatalake.AccessConditions + AccessConditions *AccessConditions CPKInfo *CPKInfo CPKScopeInfo *CPKScopeInfo } @@ -284,7 +282,7 @@ func (o *SetMetadataOptions) format() *blob.SetMetadataOptions { if o == nil { return nil } - accessConditions := shared.FormatBlobAccessConditions(o.AccessConditions) + accessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) return &blob.SetMetadataOptions{ AccessConditions: accessConditions, CPKInfo: &blob.CPKInfo{ @@ -310,6 +308,18 @@ type CPKScopeInfo struct { EncryptionScope *string } +// UndeletePathOptions contains the optional parameters for the Filesystem.UndeletePath operation. +type UndeletePathOptions struct { + // placeholder +} + +func (o *UndeletePathOptions) format() *UndeletePathOptions { + if o == nil { + return nil + } + return &UndeletePathOptions{} +} + // SourceModifiedAccessConditions identifies the source path access conditions. type SourceModifiedAccessConditions = generated.SourceModifiedAccessConditions @@ -333,3 +343,12 @@ type ExpiryTypeNever = exported.ExpiryTypeNever // SetExpiryOptions contains the optional parameters for the Client.SetExpiry method. type SetExpiryOptions = exported.SetExpiryOptions + +// AccessConditions identifies blob-specific access conditions which you optionally set. +type AccessConditions = exported.AccessConditions + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = exported.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = exported.ModifiedAccessConditions diff --git a/sdk/storage/azdatalake/file/responses.go b/sdk/storage/azdatalake/file/responses.go index e7ace65c52ee..9222c5c042c3 100644 --- a/sdk/storage/azdatalake/file/responses.go +++ b/sdk/storage/azdatalake/file/responses.go @@ -46,3 +46,6 @@ type SetHTTPHeadersResponse = blob.SetHTTPHeadersResponse // RenameResponse contains the response fields for the Create operation. type RenameResponse = generated.PathClientCreateResponse + +// UndeletePathResponse contains the response from method FilesystemClient.UndeletePath. +type UndeletePathResponse = generated.PathClientUndeleteResponse diff --git a/sdk/storage/azdatalake/filesystem/client.go b/sdk/storage/azdatalake/filesystem/client.go index 06f76dd537c7..151ca389efea 100644 --- a/sdk/storage/azdatalake/filesystem/client.go +++ b/sdk/storage/azdatalake/filesystem/client.go @@ -13,11 +13,16 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/datalakeerror" "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/base" "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/sas" + "net/http" "strings" + "time" ) // ClientOptions contains the optional parameters when creating a Client. @@ -26,11 +31,41 @@ type ClientOptions base.ClientOptions // Client represents a URL to the Azure Datalake Storage service. type Client base.CompositeClient[generated.FileSystemClient, generated.FileSystemClient, container.Client] -//TODO: NewClient() +// NewClient creates an instance of Client with the specified values. +// - filesystemURL - the URL of the blob e.g. https://.dfs.core.windows.net/fs +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(filesystemURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + containerURL := strings.Replace(filesystemURL, ".dfs.", ".blob.", 1) + filesystemURL = strings.Replace(filesystemURL, ".blob.", ".dfs.", 1) + + authPolicy := shared.NewStorageChallengePolicy(cred) + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{ + PerRetry: []policy.Policy{authPolicy}, + } + base.SetPipelineOptions((*base.ClientOptions)(conOptions), &plOpts) + + azClient, err := azcore.NewClient(shared.FilesystemClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + if options == nil { + options = &ClientOptions{} + } + containerClientOpts := container.ClientOptions{ + ClientOptions: options.ClientOptions, + } + blobContainerClient, _ := container.NewClient(containerURL, cred, &containerClientOpts) + fsClient := base.NewFilesystemClient(filesystemURL, containerURL, blobContainerClient, azClient, nil, (*base.ClientOptions)(conOptions)) + + return (*Client)(fsClient), nil +} // NewClientWithNoCredential creates an instance of Client with the specified values. // This is used to anonymously access a storage account or with a shared access signature (SAS) token. -// - serviceURL - the URL of the storage account e.g. https://.dfs.core.windows.net/? +// - filesystemURL - the URL of the storage account e.g. https://.dfs.core.windows.net/fs? // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(filesystemURL string, options *ClientOptions) (*Client, error) { containerURL := strings.Replace(filesystemURL, ".dfs.", ".blob.", 1) @@ -45,6 +80,9 @@ func NewClientWithNoCredential(filesystemURL string, options *ClientOptions) (*C return nil, err } + if options == nil { + options = &ClientOptions{} + } containerClientOpts := container.ClientOptions{ ClientOptions: options.ClientOptions, } @@ -55,7 +93,7 @@ func NewClientWithNoCredential(filesystemURL string, options *ClientOptions) (*C } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. -// - serviceURL - the URL of the storage account e.g. https://.dfs.core.windows.net/ +// - filesystemURL - the URL of the storage account e.g. https://.dfs.core.windows.net/fs // - cred - a SharedKeyCredential created with the matching storage account and access key // - options - client options; pass nil to accept the default values func NewClientWithSharedKeyCredential(filesystemURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { @@ -74,6 +112,9 @@ func NewClientWithSharedKeyCredential(filesystemURL string, cred *SharedKeyCrede return nil, err } + if options == nil { + options = &ClientOptions{} + } containerClientOpts := container.ClientOptions{ ClientOptions: options.ClientOptions, } @@ -124,58 +165,150 @@ func (fs *Client) sharedKey() *exported.SharedKeyCredential { return base.SharedKeyComposite((*base.CompositeClient[generated.FileSystemClient, generated.FileSystemClient, container.Client])(fs)) } -// URL returns the URL endpoint used by the Client object. -func (fs *Client) URL() string { - return "s.generated().Endpoint()" +// DFSURL returns the URL endpoint used by the Client object. +func (fs *Client) DFSURL() string { + return fs.generatedFSClientWithDFS().Endpoint() +} + +// BlobURL returns the URL endpoint used by the Client object. +func (fs *Client) BlobURL() string { + return fs.generatedFSClientWithBlob().Endpoint() } // Create creates a new filesystem under the specified account. (blob3). func (fs *Client) Create(ctx context.Context, options *CreateOptions) (CreateResponse, error) { - return CreateResponse{}, nil + opts := options.format() + return fs.containerClient().Create(ctx, opts) } // Delete deletes the specified filesystem and any files or directories it contains. (blob3). func (fs *Client) Delete(ctx context.Context, options *DeleteOptions) (DeleteResponse, error) { - return DeleteResponse{}, nil + opts := options.format() + return fs.containerClient().Delete(ctx, opts) } // GetProperties returns all user-defined metadata, standard HTTP properties, and system properties for the filesystem. (blob3). func (fs *Client) GetProperties(ctx context.Context, options *GetPropertiesOptions) (GetPropertiesResponse, error) { - // TODO: format blob response to fs response - return GetPropertiesResponse{}, nil + opts := options.format() + newResp := GetPropertiesResponse{} + resp, err := fs.containerClient().GetProperties(ctx, opts) + // TODO: find a cleaner way to not use lease from blob package + formatFilesystemProperties(&newResp, &resp) + return newResp, err } // SetMetadata sets one or more user-defined name-value pairs for the specified filesystem. (blob3). func (fs *Client) SetMetadata(ctx context.Context, options *SetMetadataOptions) (SetMetadataResponse, error) { - return SetMetadataResponse{}, nil + opts := options.format() + return fs.containerClient().SetMetadata(ctx, opts) } // SetAccessPolicy sets the permissions for the specified filesystem or the files and directories under it. (blob3). func (fs *Client) SetAccessPolicy(ctx context.Context, options *SetAccessPolicyOptions) (SetAccessPolicyResponse, error) { - return SetAccessPolicyResponse{}, nil + opts := options.format() + return fs.containerClient().SetAccessPolicy(ctx, opts) } // GetAccessPolicy returns the permissions for the specified filesystem or the files and directories under it. (blob3). func (fs *Client) GetAccessPolicy(ctx context.Context, options *GetAccessPolicyOptions) (GetAccessPolicyResponse, error) { - return GetAccessPolicyResponse{}, nil + opts := options.format() + newResp := GetAccessPolicyResponse{} + resp, err := fs.containerClient().GetAccessPolicy(ctx, opts) + formatGetAccessPolicyResponse(&newResp, &resp) + return newResp, err } -// UndeletePath restores the specified path that was previously deleted. (dfs op/blob2). -func (fs *Client) UndeletePath(ctx context.Context, path string, options *UndeletePathOptions) (UndeletePathResponse, error) { - return UndeletePathResponse{}, nil -} +// TODO: implement undelete path in fs client as well // NewListPathsPager operation returns a pager of the shares under the specified account. (dfs1) // For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/list-shares func (fs *Client) NewListPathsPager(recursive bool, options *ListPathsOptions) *runtime.Pager[ListPathsSegmentResponse] { //TODO: look into possibility of using blob endpoint like list deleted paths is - //TODO: will use ListPathsCreateRequest - return nil + listOptions := options.format() + return runtime.NewPager(runtime.PagingHandler[ListPathsSegmentResponse]{ + More: func(page ListPathsSegmentResponse) bool { + return page.Continuation != nil && len(*page.Continuation) > 0 + }, + Fetcher: func(ctx context.Context, page *ListPathsSegmentResponse) (ListPathsSegmentResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = fs.generatedFSClientWithDFS().ListPathsCreateRequest(ctx, recursive, &listOptions) + } else { + listOptions.Continuation = page.Continuation + req, err = fs.generatedFSClientWithDFS().ListPathsCreateRequest(ctx, recursive, &listOptions) + } + if err != nil { + return ListPathsSegmentResponse{}, err + } + resp, err := fs.generatedFSClientWithDFS().InternalClient().Pipeline().Do(req) + if err != nil { + return ListPathsSegmentResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListPathsSegmentResponse{}, runtime.NewResponseError(resp) + } + return fs.generatedFSClientWithDFS().ListPathsHandleResponse(resp) + }, + }) } // NewListDeletedPathsPager operation returns a pager of the shares under the specified account. (dfs op/blob2). // For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/list-shares func (fs *Client) NewListDeletedPathsPager(options *ListDeletedPathsOptions) *runtime.Pager[ListDeletedPathsSegmentResponse] { - //TODO: will use ListBlobHierarchySegmentCreateRequest - return nil + listOptions := options.format() + return runtime.NewPager(runtime.PagingHandler[ListDeletedPathsSegmentResponse]{ + More: func(page ListDeletedPathsSegmentResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *ListDeletedPathsSegmentResponse) (ListDeletedPathsSegmentResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = fs.generatedFSClientWithDFS().ListBlobHierarchySegmentCreateRequest(ctx, &listOptions) + } else { + listOptions.Marker = page.NextMarker + req, err = fs.generatedFSClientWithDFS().ListBlobHierarchySegmentCreateRequest(ctx, &listOptions) + } + if err != nil { + return ListDeletedPathsSegmentResponse{}, err + } + resp, err := fs.generatedFSClientWithDFS().InternalClient().Pipeline().Do(req) + if err != nil { + return ListDeletedPathsSegmentResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListDeletedPathsSegmentResponse{}, runtime.NewResponseError(resp) + } + return fs.generatedFSClientWithDFS().ListBlobHierarchySegmentHandleResponse(resp) + }, + }) +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at container. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (fs *Client) GetSASURL(permissions sas.FilesystemPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { + if fs.sharedKey() == nil { + return "", datalakeerror.MissingSharedKeyCredential + } + st := o.format() + urlParts, err := azdatalake.ParseURL(fs.BlobURL()) + if err != nil { + return "", err + } + qps, err := sas.DatalakeSignatureValues{ + Version: sas.Version, + Protocol: sas.ProtocolHTTPS, + FilesystemName: urlParts.FilesystemName, + Permissions: permissions.String(), + StartTime: st, + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(fs.sharedKey()) + if err != nil { + return "", err + } + + endpoint := fs.BlobURL() + "?" + qps.Encode() + + return endpoint, nil } diff --git a/sdk/storage/azdatalake/filesystem/client_test.go b/sdk/storage/azdatalake/filesystem/client_test.go new file mode 100644 index 000000000000..fa78bef404ad --- /dev/null +++ b/sdk/storage/azdatalake/filesystem/client_test.go @@ -0,0 +1,1205 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package filesystem_test + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/datalakeerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/filesystem" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/testcommon" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/sas" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "strconv" + "strings" + "testing" + "time" +) + +func Test(t *testing.T) { + recordMode := recording.GetRecordMode() + t.Logf("Running datalake Tests in %s mode\n", recordMode) + if recordMode == recording.LiveMode { + suite.Run(t, &RecordedTestSuite{}) + suite.Run(t, &UnrecordedTestSuite{}) + } else if recordMode == recording.PlaybackMode { + suite.Run(t, &RecordedTestSuite{}) + } else if recordMode == recording.RecordingMode { + suite.Run(t, &RecordedTestSuite{}) + } +} + +func (s *RecordedTestSuite) BeforeTest(suite string, test string) { + testcommon.BeforeTest(s.T(), suite, test) +} + +func (s *RecordedTestSuite) AfterTest(suite string, test string) { + testcommon.AfterTest(s.T(), suite, test) +} + +func (s *UnrecordedTestSuite) BeforeTest(suite string, test string) { + +} + +func (s *UnrecordedTestSuite) AfterTest(suite string, test string) { + +} + +type RecordedTestSuite struct { + suite.Suite +} + +type UnrecordedTestSuite struct { + suite.Suite +} + +func validateFilesystemDeleted(_require *require.Assertions, filesystemClient *filesystem.Client) { + _, err := filesystemClient.GetAccessPolicy(context.Background(), nil) + _require.NotNil(err) + + testcommon.ValidateBlobErrorCode(_require, err, datalakeerror.ContainerNotFound) +} + +func (s *RecordedTestSuite) TestCreateFilesystem() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestCreateFilesystemWithOptions() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + testStr := "hello" + metadata := map[string]*string{"foo": &testStr, "bar": &testStr} + access := filesystem.Filesystem + opts := filesystem.CreateOptions{ + Metadata: metadata, + Access: &access, + } + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), &opts) + _require.Nil(err) + + props, err := fsClient.GetProperties(context.Background(), nil) + _require.NotNil(props.Metadata) + _require.Equal(*props.PublicAccess, filesystem.Filesystem) +} + +func (s *RecordedTestSuite) TestCreateFilesystemWithFileAccess() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + testStr := "hello" + metadata := map[string]*string{"foo": &testStr, "bar": &testStr} + access := filesystem.File + opts := filesystem.CreateOptions{ + Metadata: metadata, + Access: &access, + } + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), &opts) + _require.Nil(err) + props, err := fsClient.GetProperties(context.Background(), nil) + _require.NotNil(props.Metadata) + _require.Equal(*props.PublicAccess, filesystem.File) +} + +func (s *RecordedTestSuite) TestCreateFilesystemEmptyMetadata() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + metadata := map[string]*string{"foo": nil, "bar": nil} + access := filesystem.Filesystem + opts := filesystem.CreateOptions{ + Metadata: metadata, + Access: &access, + } + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), &opts) + _require.Nil(err) + + props, err := fsClient.GetProperties(context.Background(), nil) + _require.Nil(props.Metadata) + _require.Equal(*props.PublicAccess, filesystem.Filesystem) + +} + +func (s *RecordedTestSuite) TestFilesystemCreateInvalidName() { + _require := require.New(s.T()) + + fsClient, err := testcommon.GetFilesystemClient("foo bar", s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.NotNil(err) + testcommon.ValidateBlobErrorCode(_require, err, datalakeerror.InvalidResourceName) +} + +func (s *RecordedTestSuite) TestFilesystemCreateNameCollision() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.NotNil(err) + testcommon.ValidateBlobErrorCode(_require, err, datalakeerror.FilesystemAlreadyExists) +} + +func (s *RecordedTestSuite) TestFilesystemGetProperties() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + resp, err := fsClient.GetProperties(context.Background(), nil) + _require.Nil(err) + _require.NotNil(resp.ETag) + _require.Nil(resp.Metadata) +} + +func (s *RecordedTestSuite) TestFilesystemDelete() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + + _, err = fsClient.Delete(context.Background(), nil) + _require.Nil(err) + + validateFilesystemDeleted(_require, fsClient) +} + +func (s *RecordedTestSuite) TestFilesystemDeleteNonExistent() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Delete(context.Background(), nil) + _require.NotNil(err) + + testcommon.ValidateBlobErrorCode(_require, err, datalakeerror.ContainerNotFound) +} + +func (s *RecordedTestSuite) TestFilesystemDeleteIfModifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fsClient.Create(context.Background(), nil) + _require.Nil(err) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + deleteFilesystemOptions := filesystem.DeleteOptions{ + AccessConditions: &filesystem.AccessConditions{ + ModifiedAccessConditions: &filesystem.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + _, err = fsClient.Delete(context.Background(), &deleteFilesystemOptions) + _require.Nil(err) + validateFilesystemDeleted(_require, fsClient) +} + +func (s *RecordedTestSuite) TestFilesystemDeleteIfModifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + deleteFilesystemOptions := filesystem.DeleteOptions{ + AccessConditions: &filesystem.AccessConditions{ + ModifiedAccessConditions: &filesystem.ModifiedAccessConditions{ + IfModifiedSince: ¤tTime, + }, + }, + } + _, err = fsClient.Delete(context.Background(), &deleteFilesystemOptions) + _require.NotNil(err) + testcommon.ValidateBlobErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestFilesystemDeleteIfUnModifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fsClient.Create(context.Background(), nil) + _require.Nil(err) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + deleteFilesystemOptions := filesystem.DeleteOptions{ + AccessConditions: &filesystem.AccessConditions{ + ModifiedAccessConditions: &filesystem.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }, + } + _, err = fsClient.Delete(context.Background(), &deleteFilesystemOptions) + _require.Nil(err) + + validateFilesystemDeleted(_require, fsClient) +} + +func (s *RecordedTestSuite) TestFilesystemDeleteIfUnModifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + deleteFilesystemOptions := filesystem.DeleteOptions{ + AccessConditions: &filesystem.AccessConditions{ + ModifiedAccessConditions: &filesystem.ModifiedAccessConditions{ + IfUnmodifiedSince: ¤tTime, + }, + }, + } + _, err = fsClient.Delete(context.Background(), &deleteFilesystemOptions) + _require.NotNil(err) + + testcommon.ValidateBlobErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestFilesystemSetMetadataNonEmpty() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + opts := filesystem.SetMetadataOptions{ + Metadata: testcommon.BasicMetadata, + } + _, err = fsClient.SetMetadata(context.Background(), &opts) + _require.Nil(err) + + resp1, err := fsClient.GetProperties(context.Background(), nil) + _require.Nil(err) + + for k, v := range testcommon.BasicMetadata { + _require.Equal(v, resp1.Metadata[k]) + } +} + +func (s *RecordedTestSuite) TestFilesystemSetMetadataEmpty() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + opts := filesystem.SetMetadataOptions{ + Metadata: map[string]*string{}, + } + + _, err = fsClient.SetMetadata(context.Background(), &opts) + _require.Nil(err) + + resp1, err := fsClient.GetProperties(context.Background(), nil) + _require.Nil(err) + _require.Nil(resp1.Metadata) +} + +func (s *RecordedTestSuite) TestFilesystemSetMetadataNil() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + _, err = fsClient.SetMetadata(context.Background(), nil) + _require.Nil(err) + + resp1, err := fsClient.GetProperties(context.Background(), nil) + _require.Nil(err) + _require.Nil(resp1.Metadata) +} + +func (s *RecordedTestSuite) TestFilesystemSetMetadataInvalidField() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + opts := filesystem.SetMetadataOptions{ + Metadata: map[string]*string{"!nval!d Field!@#%": to.Ptr("value")}, + } + _, err = fsClient.SetMetadata(context.Background(), &opts) + _require.NotNil(err) + _require.Equal(strings.Contains(err.Error(), testcommon.InvalidHeaderErrorSubstring), true) +} + +func (s *RecordedTestSuite) TestFilesystemSetMetadataNonExistent() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.SetMetadata(context.Background(), nil) + _require.NotNil(err) + + testcommon.ValidateBlobErrorCode(_require, err, datalakeerror.ContainerNotFound) +} + +func (s *RecordedTestSuite) TestSetEmptyAccessPolicy() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + _, err = fsClient.SetAccessPolicy(context.Background(), &filesystem.SetAccessPolicyOptions{}) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestSetNilAccessPolicy() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + _, err = fsClient.SetAccessPolicy(context.Background(), nil) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestSetAccessPolicy() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + start := time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC) + expiration := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + permission := "r" + id := "1" + + signedIdentifiers := make([]*filesystem.SignedIdentifier, 0) + + signedIdentifiers = append(signedIdentifiers, &filesystem.SignedIdentifier{ + AccessPolicy: &filesystem.AccessPolicy{ + Expiry: &expiration, + Start: &start, + Permission: &permission, + }, + ID: &id, + }) + options := filesystem.SetAccessPolicyOptions{FilesystemACL: signedIdentifiers} + _, err = fsClient.SetAccessPolicy(context.Background(), &options) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestSetMultipleAccessPolicies() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + id := "empty" + + signedIdentifiers := make([]*filesystem.SignedIdentifier, 0) + signedIdentifiers = append(signedIdentifiers, &filesystem.SignedIdentifier{ + ID: &id, + }) + + permission2 := "r" + id2 := "partial" + + signedIdentifiers = append(signedIdentifiers, &filesystem.SignedIdentifier{ + ID: &id2, + AccessPolicy: &filesystem.AccessPolicy{ + Permission: &permission2, + }, + }) + + id3 := "full" + permission3 := "r" + start := time.Date(2021, 6, 8, 2, 10, 9, 0, time.UTC) + expiry := time.Date(2021, 6, 8, 2, 10, 9, 0, time.UTC) + + signedIdentifiers = append(signedIdentifiers, &filesystem.SignedIdentifier{ + ID: &id3, + AccessPolicy: &filesystem.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &permission3, + }, + }) + options := filesystem.SetAccessPolicyOptions{FilesystemACL: signedIdentifiers} + _, err = fsClient.SetAccessPolicy(context.Background(), &options) + _require.Nil(err) + + // Make a Get to assert two access policies + resp, err := fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.Len(resp.SignedIdentifiers, 3) +} + +func (s *RecordedTestSuite) TestSetNullAccessPolicy() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + id := "null" + + signedIdentifiers := make([]*filesystem.SignedIdentifier, 0) + signedIdentifiers = append(signedIdentifiers, &filesystem.SignedIdentifier{ + ID: &id, + }) + options := filesystem.SetAccessPolicyOptions{FilesystemACL: signedIdentifiers} + _, err = fsClient.SetAccessPolicy(context.Background(), &options) + _require.Nil(err) + + resp, err := fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.Equal(len(resp.SignedIdentifiers), 1) +} + +func (s *RecordedTestSuite) TestFilesystemGetSetPermissionsMultiplePolicies() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + // Define the policies + start, err := time.Parse(time.UnixDate, "Fri Jun 11 20:00:00 UTC 2021") + _require.Nil(err) + expiry := start.Add(5 * time.Minute) + expiry2 := start.Add(time.Minute) + readWrite := to.Ptr(filesystem.AccessPolicyPermission{Read: true, Write: true}).String() + readOnly := to.Ptr(filesystem.AccessPolicyPermission{Read: true}).String() + id1, id2 := "0000", "0001" + permissions := []*filesystem.SignedIdentifier{ + {ID: &id1, + AccessPolicy: &filesystem.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &readWrite, + }, + }, + {ID: &id2, + AccessPolicy: &filesystem.AccessPolicy{ + Start: &start, + Expiry: &expiry2, + Permission: &readOnly, + }, + }, + } + options := filesystem.SetAccessPolicyOptions{FilesystemACL: permissions} + _, err = fsClient.SetAccessPolicy(context.Background(), &options) + + _require.Nil(err) + + resp, err := fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.EqualValues(resp.SignedIdentifiers, permissions) +} + +func (s *RecordedTestSuite) TestFilesystemGetPermissionsPublicAccessNotNone() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + access := filesystem.File + createContainerOptions := filesystem.CreateOptions{ + Access: &access, + } + _, err = fsClient.Create(context.Background(), &createContainerOptions) // We create the container explicitly so we can be sure the access policy is not empty + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + resp, err := fsClient.GetAccessPolicy(context.Background(), nil) + + _require.Nil(err) + _require.Equal(*resp.PublicAccess, filesystem.File) +} + +// TODO: TestFilesystemSetPermissionsPublicAccessNone() + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsPublicAccessTypeFile() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + setAccessPolicyOptions := filesystem.SetAccessPolicyOptions{ + Access: to.Ptr(filesystem.File), + } + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.Nil(err) + + resp, err := fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.Equal(*resp.PublicAccess, filesystem.File) +} + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsPublicAccessFilesystem() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + setAccessPolicyOptions := filesystem.SetAccessPolicyOptions{ + Access: to.Ptr(filesystem.Filesystem), + } + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.Nil(err) + + resp, err := fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.Equal(*resp.PublicAccess, filesystem.Filesystem) +} + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsACLMoreThanFive() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + start, err := time.Parse(time.UnixDate, "Fri Jun 11 20:00:00 UTC 2021") + _require.Nil(err) + expiry, err := time.Parse(time.UnixDate, "Fri Jun 11 20:00:00 UTC 2049") + _require.Nil(err) + permissions := make([]*filesystem.SignedIdentifier, 6) + listOnly := to.Ptr(filesystem.AccessPolicyPermission{Read: true}).String() + for i := 0; i < 6; i++ { + id := "000" + strconv.Itoa(i) + permissions[i] = &filesystem.SignedIdentifier{ + ID: &id, + AccessPolicy: &filesystem.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &listOnly, + }, + } + } + + access := filesystem.File + setAccessPolicyOptions := filesystem.SetAccessPolicyOptions{ + Access: &access, + } + setAccessPolicyOptions.FilesystemACL = permissions + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.NotNil(err) + + testcommon.ValidateBlobErrorCode(_require, err, datalakeerror.InvalidXMLDocument) +} + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsDeleteAndModifyACL() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + start, err := time.Parse(time.UnixDate, "Fri Jun 11 20:00:00 UTC 2021") + _require.Nil(err) + expiry, err := time.Parse(time.UnixDate, "Fri Jun 11 20:00:00 UTC 2049") + _require.Nil(err) + listOnly := to.Ptr(filesystem.AccessPolicyPermission{Read: true}).String() + permissions := make([]*filesystem.SignedIdentifier, 2) + for i := 0; i < 2; i++ { + id := "000" + strconv.Itoa(i) + permissions[i] = &filesystem.SignedIdentifier{ + ID: &id, + AccessPolicy: &filesystem.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &listOnly, + }, + } + } + + access := filesystem.File + setAccessPolicyOptions := filesystem.SetAccessPolicyOptions{ + Access: &access, + } + setAccessPolicyOptions.FilesystemACL = permissions + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.Nil(err) + + resp, err := fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.EqualValues(resp.SignedIdentifiers, permissions) + + permissions = resp.SignedIdentifiers[:1] // Delete the first policy by removing it from the slice + newId := "0004" + permissions[0].ID = &newId // Modify the remaining policy which is at index 0 in the new slice + setAccessPolicyOptions1 := filesystem.SetAccessPolicyOptions{ + Access: &access, + } + setAccessPolicyOptions1.FilesystemACL = permissions + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions1) + _require.Nil(err) + + resp, err = fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.Len(resp.SignedIdentifiers, 1) + _require.EqualValues(resp.SignedIdentifiers, permissions) +} + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsDeleteAllPolicies() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + start, err := time.Parse(time.UnixDate, "Fri Jun 11 20:00:00 UTC 2021") + _require.Nil(err) + expiry, err := time.Parse(time.UnixDate, "Fri Jun 11 20:00:00 UTC 2049") + _require.Nil(err) + permissions := make([]*filesystem.SignedIdentifier, 2) + listOnly := to.Ptr(filesystem.AccessPolicyPermission{Read: true}).String() + for i := 0; i < 2; i++ { + id := "000" + strconv.Itoa(i) + permissions[i] = &filesystem.SignedIdentifier{ + ID: &id, + AccessPolicy: &filesystem.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &listOnly, + }, + } + } + + setAccessPolicyOptions := filesystem.SetAccessPolicyOptions{ + Access: to.Ptr(filesystem.File), + } + setAccessPolicyOptions.FilesystemACL = permissions + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.Nil(err) + + resp, err := fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.Len(resp.SignedIdentifiers, len(permissions)) + _require.EqualValues(resp.SignedIdentifiers, permissions) + + setAccessPolicyOptions = filesystem.SetAccessPolicyOptions{ + Access: to.Ptr(filesystem.File), + } + setAccessPolicyOptions.FilesystemACL = []*filesystem.SignedIdentifier{} + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.Nil(err) + + resp, err = fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.Nil(resp.SignedIdentifiers) +} + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsInvalidPolicyTimes() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + // Swap start and expiry + expiry, err := time.Parse(time.UnixDate, "Fri Jun 11 20:00:00 UTC 2021") + _require.Nil(err) + start, err := time.Parse(time.UnixDate, "Fri Jun 11 20:00:00 UTC 2049") + _require.Nil(err) + permissions := make([]*filesystem.SignedIdentifier, 2) + listOnly := to.Ptr(filesystem.AccessPolicyPermission{Read: true}).String() + for i := 0; i < 2; i++ { + id := "000" + strconv.Itoa(i) + permissions[i] = &filesystem.SignedIdentifier{ + ID: &id, + AccessPolicy: &filesystem.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &listOnly, + }, + } + } + + setAccessPolicyOptions := filesystem.SetAccessPolicyOptions{ + Access: to.Ptr(filesystem.File), + } + setAccessPolicyOptions.FilesystemACL = permissions + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsNilPolicySlice() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + _, err = fsClient.SetAccessPolicy(context.Background(), nil) + _require.Nil(err) +} + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsSignedIdentifierTooLong() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + id := "" + for i := 0; i < 65; i++ { + id += "a" + } + expiry, err := time.Parse(time.UnixDate, "Fri Jun 11 20:00:00 UTC 2021") + _require.Nil(err) + start := expiry.Add(5 * time.Minute).UTC() + permissions := make([]*filesystem.SignedIdentifier, 2) + listOnly := to.Ptr(filesystem.AccessPolicyPermission{Read: true}).String() + for i := 0; i < 2; i++ { + permissions[i] = &filesystem.SignedIdentifier{ + ID: &id, + AccessPolicy: &filesystem.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &listOnly, + }, + } + } + + setAccessPolicyOptions := filesystem.SetAccessPolicyOptions{ + Access: to.Ptr(filesystem.File), + } + setAccessPolicyOptions.FilesystemACL = permissions + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.NotNil(err) + + testcommon.ValidateBlobErrorCode(_require, err, datalakeerror.InvalidXMLDocument) +} + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsIfModifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + setAccessPolicyOptions := filesystem.SetAccessPolicyOptions{ + AccessConditions: &filesystem.AccessConditions{ + ModifiedAccessConditions: &filesystem.ModifiedAccessConditions{IfModifiedSince: ¤tTime}, + }, + } + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.Nil(err) + + resp1, err := fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.Nil(resp1.PublicAccess) +} + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsIfModifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + setAccessPolicyOptions := filesystem.SetAccessPolicyOptions{ + AccessConditions: &filesystem.AccessConditions{ + ModifiedAccessConditions: &filesystem.ModifiedAccessConditions{IfModifiedSince: ¤tTime}, + }, + } + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.NotNil(err) + + testcommon.ValidateBlobErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsIfUnModifiedSinceTrue() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, 10) + + setAccessPolicyOptions := filesystem.SetAccessPolicyOptions{ + AccessConditions: &filesystem.AccessConditions{ + ModifiedAccessConditions: &filesystem.ModifiedAccessConditions{IfUnmodifiedSince: ¤tTime}, + }, + } + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.Nil(err) + + resp1, err := fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.Nil(resp1.PublicAccess) +} + +func (s *RecordedTestSuite) TestFilesystemSetPermissionsIfUnModifiedSinceFalse() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + resp, err := fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + currentTime := testcommon.GetRelativeTimeFromAnchor(resp.Date, -10) + + setAccessPolicyOptions := filesystem.SetAccessPolicyOptions{ + AccessConditions: &filesystem.AccessConditions{ + ModifiedAccessConditions: &filesystem.ModifiedAccessConditions{IfUnmodifiedSince: ¤tTime}, + }, + } + _, err = fsClient.SetAccessPolicy(context.Background(), &setAccessPolicyOptions) + _require.NotNil(err) + + testcommon.ValidateBlobErrorCode(_require, err, datalakeerror.ConditionNotMet) +} + +func (s *RecordedTestSuite) TestSetAccessPoliciesInDifferentTimeFormats() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + id := "timeInEST" + permission := "rw" + loc, err := time.LoadLocation("EST") + _require.Nil(err) + start := time.Now().In(loc) + expiry := start.Add(10 * time.Hour) + + signedIdentifiers := make([]*filesystem.SignedIdentifier, 0) + signedIdentifiers = append(signedIdentifiers, &filesystem.SignedIdentifier{ + ID: &id, + AccessPolicy: &filesystem.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &permission, + }, + }) + + id2 := "timeInIST" + permission2 := "r" + loc2, err := time.LoadLocation("Asia/Kolkata") + _require.Nil(err) + start2 := time.Now().In(loc2) + expiry2 := start2.Add(5 * time.Hour) + + signedIdentifiers = append(signedIdentifiers, &filesystem.SignedIdentifier{ + ID: &id2, + AccessPolicy: &filesystem.AccessPolicy{ + Start: &start2, + Expiry: &expiry2, + Permission: &permission2, + }, + }) + + id3 := "nilTime" + permission3 := "r" + + signedIdentifiers = append(signedIdentifiers, &filesystem.SignedIdentifier{ + ID: &id3, + AccessPolicy: &filesystem.AccessPolicy{ + Permission: &permission3, + }, + }) + options := filesystem.SetAccessPolicyOptions{FilesystemACL: signedIdentifiers} + _, err = fsClient.SetAccessPolicy(context.Background(), &options) + _require.Nil(err) + + // make a Get to assert three access policies + resp1, err := fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.Len(resp1.SignedIdentifiers, 3) + _require.EqualValues(resp1.SignedIdentifiers, signedIdentifiers) +} + +func (s *RecordedTestSuite) TestSetAccessPolicyWithNullId() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + signedIdentifiers := make([]*filesystem.SignedIdentifier, 0) + signedIdentifiers = append(signedIdentifiers, &filesystem.SignedIdentifier{ + AccessPolicy: &filesystem.AccessPolicy{ + Permission: to.Ptr("rw"), + }, + }) + + options := filesystem.SetAccessPolicyOptions{FilesystemACL: signedIdentifiers} + _, err = fsClient.SetAccessPolicy(context.Background(), &options) + _require.NotNil(err) + testcommon.ValidateBlobErrorCode(_require, err, datalakeerror.InvalidXMLDocument) + + resp1, err := fsClient.GetAccessPolicy(context.Background(), nil) + _require.Nil(err) + _require.Len(resp1.SignedIdentifiers, 0) +} + +func (s *UnrecordedTestSuite) TestSASFilesystemClient() { + _require := require.New(s.T()) + testName := s.T().Name() + + filesystemName := testcommon.GenerateFilesystemName(testName) + fsClient, err := testcommon.GetFilesystemClient(filesystemName, s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + _, err = fsClient.Create(context.Background(), nil) + _require.Nil(err) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + // Adding SAS and options + permissions := sas.FilesystemPermissions{ + Read: true, + Add: true, + Write: true, + Create: true, + Delete: true, + } + expiry := time.Now().Add(time.Hour) + + // filesystemSASURL is created with GetSASURL + sasUrl, err := fsClient.GetSASURL(permissions, expiry, nil) + _require.Nil(err) + + // Create filesystem client with sasUrl + _, err = filesystem.NewClientWithNoCredential(sasUrl, nil) + _require.Nil(err) +} + +// TODO: test sas on files + +//func (s *RecordedTestSuite) TestFilesystemListPaths() { +// _require := require.New(s.T()) +// //testName := s.T().Name() +// +// //filesystemName := testcommon.GenerateFilesystemName(testName) +// fsClient, err := testcommon.GetFilesystemClient("cont1", s.T(), testcommon.TestAccountDatalake, nil) +// _require.NoError(err) +// //defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) +// +// //_, err = fsClient.Create(context.Background(), nil) +// //_require.Nil(err) +// +// resp, err := fsClient.GetProperties(context.Background(), nil) +// _require.Nil(err) +// _require.NotNil(resp.ETag) +// _require.Nil(resp.Metadata) +// +// pager := fsClient.NewListPathsPager(true, nil) +// +// for pager.More() { +// _, err := pager.NextPage(context.Background()) +// _require.NotNil(err) +// if err != nil { +// break +// } +// } +//} + +// TODO: Lease tests diff --git a/sdk/storage/azdatalake/filesystem/constants.go b/sdk/storage/azdatalake/filesystem/constants.go index 3e0c373b87a1..3f862296578a 100644 --- a/sdk/storage/azdatalake/filesystem/constants.go +++ b/sdk/storage/azdatalake/filesystem/constants.go @@ -7,6 +7,7 @@ package filesystem import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/lease" // PublicAccessType defines values for AccessType - private (default) or file or filesystem. type PublicAccessType = azblob.PublicAccessType @@ -15,3 +16,45 @@ const ( File PublicAccessType = azblob.PublicAccessTypeBlob Filesystem PublicAccessType = azblob.PublicAccessTypeContainer ) + +// StatusType defines values for StatusType +type StatusType = lease.StatusType + +const ( + StatusTypeLocked StatusType = lease.StatusTypeLocked + StatusTypeUnlocked StatusType = lease.StatusTypeUnlocked +) + +// PossibleStatusTypeValues returns the possible values for the StatusType const type. +func PossibleStatusTypeValues() []StatusType { + return lease.PossibleStatusTypeValues() +} + +// DurationType defines values for DurationType +type DurationType = lease.DurationType + +const ( + DurationTypeInfinite DurationType = lease.DurationTypeInfinite + DurationTypeFixed DurationType = lease.DurationTypeFixed +) + +// PossibleDurationTypeValues returns the possible values for the DurationType const type. +func PossibleDurationTypeValues() []DurationType { + return lease.PossibleDurationTypeValues() +} + +// StateType defines values for StateType +type StateType = lease.StateType + +const ( + StateTypeAvailable StateType = lease.StateTypeAvailable + StateTypeLeased StateType = lease.StateTypeLeased + StateTypeExpired StateType = lease.StateTypeExpired + StateTypeBreaking StateType = lease.StateTypeBreaking + StateTypeBroken StateType = lease.StateTypeBroken +) + +// PossibleStateTypeValues returns the possible values for the StateType const type. +func PossibleStateTypeValues() []StateType { + return lease.PossibleStateTypeValues() +} diff --git a/sdk/storage/azdatalake/filesystem/models.go b/sdk/storage/azdatalake/filesystem/models.go index ebb1946ec388..4da5f91d387e 100644 --- a/sdk/storage/azdatalake/filesystem/models.go +++ b/sdk/storage/azdatalake/filesystem/models.go @@ -8,9 +8,9 @@ package filesystem import ( "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake" "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" + "time" ) // SetAccessPolicyOptions provides set of configurations for Filesystem.SetAccessPolicy operation. @@ -18,14 +18,17 @@ type SetAccessPolicyOptions struct { // Specifies whether data in the filesystem may be accessed publicly and the level of access. // If this header is not included in the request, filesystem data is private to the account owner. Access *PublicAccessType - AccessConditions *azdatalake.AccessConditions + AccessConditions *AccessConditions FilesystemACL []*SignedIdentifier } func (o *SetAccessPolicyOptions) format() *container.SetAccessPolicyOptions { + if o == nil { + return nil + } return &container.SetAccessPolicyOptions{ Access: o.Access, - AccessConditions: shared.FormatContainerAccessConditions(o.AccessConditions), + AccessConditions: exported.FormatContainerAccessConditions(o.AccessConditions), ContainerACL: o.FilesystemACL, } } @@ -43,6 +46,9 @@ type CreateOptions struct { } func (o *CreateOptions) format() *container.CreateOptions { + if o == nil { + return nil + } return &container.CreateOptions{ Access: o.Access, Metadata: o.Metadata, @@ -52,21 +58,30 @@ func (o *CreateOptions) format() *container.CreateOptions { // DeleteOptions contains the optional parameters for the Client.Delete method. type DeleteOptions struct { - AccessConditions *azdatalake.AccessConditions + AccessConditions *AccessConditions } func (o *DeleteOptions) format() *container.DeleteOptions { + if o == nil { + return nil + } return &container.DeleteOptions{ - AccessConditions: shared.FormatContainerAccessConditions(o.AccessConditions), + AccessConditions: exported.FormatContainerAccessConditions(o.AccessConditions), } } // GetPropertiesOptions contains the optional parameters for the FilesystemClient.GetProperties method. type GetPropertiesOptions struct { - LeaseAccessConditions *azdatalake.LeaseAccessConditions + LeaseAccessConditions *LeaseAccessConditions } func (o *GetPropertiesOptions) format() *container.GetPropertiesOptions { + if o == nil { + return nil + } + if o.LeaseAccessConditions == nil { + o.LeaseAccessConditions = &LeaseAccessConditions{} + } return &container.GetPropertiesOptions{ LeaseAccessConditions: &container.LeaseAccessConditions{ LeaseID: o.LeaseAccessConditions.LeaseID, @@ -77,30 +92,33 @@ func (o *GetPropertiesOptions) format() *container.GetPropertiesOptions { // SetMetadataOptions contains the optional parameters for the Client.SetMetadata method. type SetMetadataOptions struct { Metadata map[string]*string - AccessConditions *azdatalake.AccessConditions + AccessConditions *AccessConditions } func (o *SetMetadataOptions) format() *container.SetMetadataOptions { + if o == nil { + return nil + } + accConditions := exported.FormatContainerAccessConditions(o.AccessConditions) return &container.SetMetadataOptions{ - Metadata: o.Metadata, - LeaseAccessConditions: &container.LeaseAccessConditions{ - LeaseID: o.AccessConditions.LeaseAccessConditions.LeaseID, - }, - ModifiedAccessConditions: &container.ModifiedAccessConditions{ - IfMatch: o.AccessConditions.ModifiedAccessConditions.IfMatch, - IfNoneMatch: o.AccessConditions.ModifiedAccessConditions.IfNoneMatch, - IfModifiedSince: o.AccessConditions.ModifiedAccessConditions.IfModifiedSince, - IfUnmodifiedSince: o.AccessConditions.ModifiedAccessConditions.IfUnmodifiedSince, - }, + Metadata: o.Metadata, + LeaseAccessConditions: accConditions.LeaseAccessConditions, + ModifiedAccessConditions: accConditions.ModifiedAccessConditions, } } // GetAccessPolicyOptions contains the optional parameters for the Client.GetAccessPolicy method. type GetAccessPolicyOptions struct { - LeaseAccessConditions *azdatalake.LeaseAccessConditions + LeaseAccessConditions *LeaseAccessConditions } func (o *GetAccessPolicyOptions) format() *container.GetAccessPolicyOptions { + if o == nil { + return nil + } + if o.LeaseAccessConditions == nil { + o.LeaseAccessConditions = &LeaseAccessConditions{} + } return &container.GetAccessPolicyOptions{ LeaseAccessConditions: &container.LeaseAccessConditions{ LeaseID: o.LeaseAccessConditions.LeaseID, @@ -114,6 +132,10 @@ type CPKScopeInfo = container.CPKScopeInfo // AccessPolicy - An Access policy. type AccessPolicy = container.AccessPolicy +// AccessPolicyPermission type simplifies creating the permissions string for a container's access policy. +// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. +type AccessPolicyPermission = exported.AccessPolicyPermission + // SignedIdentifier - signed identifier. type SignedIdentifier = container.SignedIdentifier @@ -125,18 +147,66 @@ type ListPathsOptions struct { Upn *bool } +func (o *ListPathsOptions) format() generated.FileSystemClientListPathsOptions { + if o == nil { + return generated.FileSystemClientListPathsOptions{} + } + + return generated.FileSystemClientListPathsOptions{ + Continuation: o.Marker, + MaxResults: o.MaxResults, + Path: o.Prefix, + Upn: o.Upn, + } +} + // ListDeletedPathsOptions contains the optional parameters for the Filesystem.ListDeletedPaths operation. PLACEHOLDER type ListDeletedPathsOptions struct { Marker *string MaxResults *int32 Prefix *string - Upn *bool } -// UndeletePathOptions contains the optional parameters for the Filesystem.UndeletePath operation. -type UndeletePathOptions struct { - // placeholder +func (o *ListDeletedPathsOptions) format() generated.FileSystemClientListBlobHierarchySegmentOptions { + showOnly := "deleted" + if o == nil { + return generated.FileSystemClientListBlobHierarchySegmentOptions{Showonly: &showOnly} + } + return generated.FileSystemClientListBlobHierarchySegmentOptions{ + Marker: o.Marker, + MaxResults: o.MaxResults, + Prefix: o.Prefix, + Showonly: &showOnly, + } +} + +// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. +type GetSASURLOptions struct { + StartTime *time.Time +} + +func (o *GetSASURLOptions) format() time.Time { + if o == nil { + return time.Time{} + } + + var st time.Time + if o.StartTime != nil { + st = o.StartTime.UTC() + } else { + st = time.Time{} + } + return st } // SharedKeyCredential contains an account's name and its primary or secondary key. type SharedKeyCredential = exported.SharedKeyCredential + +// AccessConditions identifies blob-specific access conditions which you optionally set. +type AccessConditions = exported.AccessConditions + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = exported.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = exported.ModifiedAccessConditions diff --git a/sdk/storage/azdatalake/filesystem/responses.go b/sdk/storage/azdatalake/filesystem/responses.go index 06b9d8b78a86..e0d4c79b9533 100644 --- a/sdk/storage/azdatalake/filesystem/responses.go +++ b/sdk/storage/azdatalake/filesystem/responses.go @@ -7,8 +7,10 @@ package filesystem import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" + "time" ) // CreateResponse contains the response from method FilesystemClient.Create. @@ -24,16 +26,117 @@ type SetMetadataResponse = container.SetMetadataResponse type SetAccessPolicyResponse = container.SetAccessPolicyResponse // GetAccessPolicyResponse contains the response from method FilesystemClient.GetAccessPolicy. -type GetAccessPolicyResponse = container.GetAccessPolicyResponse +type GetAccessPolicyResponse struct { + // PublicAccess contains the information returned from the x-ms-blob-public-access header response. + PublicAccess *PublicAccessType `xml:"BlobPublicAccess"` + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag `xml:"ETag"` + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time `xml:"LastModified"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // a collection of signed identifiers + SignedIdentifiers []*SignedIdentifier `xml:"SignedIdentifier"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// since we want to remove the blob prefix in access type +func formatGetAccessPolicyResponse(r *GetAccessPolicyResponse, contResp *container.GetAccessPolicyResponse) { + r.PublicAccess = contResp.BlobPublicAccess + r.ClientRequestID = contResp.ClientRequestID + r.Date = contResp.Date + r.ETag = contResp.ETag + r.LastModified = contResp.LastModified + r.RequestID = contResp.RequestID + r.SignedIdentifiers = contResp.SignedIdentifiers + r.Version = contResp.Version +} // GetPropertiesResponse contains the response from method FilesystemClient.GetProperties. -type GetPropertiesResponse = generated.FileSystemClientGetPropertiesResponse +type GetPropertiesResponse struct { + // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. + PublicAccess *PublicAccessType + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // DefaultEncryptionScope contains the information returned from the x-ms-default-encryption-scope header response. + DefaultEncryptionScope *string + + // DenyEncryptionScopeOverride contains the information returned from the x-ms-deny-encryption-scope-override header response. + DenyEncryptionScopeOverride *bool + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // HasImmutabilityPolicy contains the information returned from the x-ms-has-immutability-policy header response. + HasImmutabilityPolicy *bool + + // HasLegalHold contains the information returned from the x-ms-has-legal-hold header response. + HasLegalHold *bool + + // IsImmutableStorageWithVersioningEnabled contains the information returned from the x-ms-immutable-storage-with-versioning-enabled + // header response. + IsImmutableStorageWithVersioningEnabled *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *DurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *StateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *StatusType + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +func formatFilesystemProperties(r *GetPropertiesResponse, contResp *container.GetPropertiesResponse) { + r.PublicAccess = contResp.BlobPublicAccess + r.ClientRequestID = contResp.ClientRequestID + r.Date = contResp.Date + r.DefaultEncryptionScope = contResp.DefaultEncryptionScope + r.DenyEncryptionScopeOverride = contResp.DenyEncryptionScopeOverride + r.ETag = contResp.ETag + r.HasImmutabilityPolicy = contResp.HasImmutabilityPolicy + r.HasLegalHold = contResp.HasLegalHold + r.IsImmutableStorageWithVersioningEnabled = contResp.IsImmutableStorageWithVersioningEnabled + r.LastModified = contResp.LastModified + r.LeaseDuration = contResp.LeaseDuration + r.LeaseState = contResp.LeaseState + r.LeaseStatus = contResp.LeaseStatus + r.Metadata = contResp.Metadata + r.RequestID = contResp.RequestID + r.Version = contResp.Version +} // ListPathsSegmentResponse contains the response from method FilesystemClient.ListPathsSegment. type ListPathsSegmentResponse = generated.FileSystemClientListPathsResponse // ListDeletedPathsSegmentResponse contains the response from method FilesystemClient.ListPathsSegment. type ListDeletedPathsSegmentResponse = generated.FileSystemClientListBlobHierarchySegmentResponse - -// UndeletePathResponse contains the response from method FilesystemClient.UndeletePath. -type UndeletePathResponse = generated.PathClientUndeleteResponse diff --git a/sdk/storage/azdatalake/go.mod b/sdk/storage/azdatalake/go.mod index 6fc071bf6038..815ae6a72795 100644 --- a/sdk/storage/azdatalake/go.mod +++ b/sdk/storage/azdatalake/go.mod @@ -4,23 +4,17 @@ go 1.18 require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.5.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 github.com/stretchr/testify v1.7.1 ) require ( - github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/golang-jwt/jwt v3.2.1+incompatible // indirect - github.com/google/uuid v1.1.1 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect - github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect + github.com/dnaeon/go-vcr v1.1.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88 // indirect golang.org/x/net v0.8.0 // indirect - golang.org/x/sys v0.6.0 // indirect golang.org/x/text v0.8.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) diff --git a/sdk/storage/azdatalake/go.sum b/sdk/storage/azdatalake/go.sum index 44c525ae55ed..7999cbb85ae5 100644 --- a/sdk/storage/azdatalake/go.sum +++ b/sdk/storage/azdatalake/go.sum @@ -1,44 +1,37 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.5.0 h1:xGLAFFd9D3iLGxYiUGPdITSzsFmU1K8VtfuUHWAoN7M= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.5.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag= github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 h1:BWe8a+f/t+7KY7zH2mqygeUD0t8hNFXe08p1Pb3/jKE= -github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= +github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= -github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88 h1:Tgea0cVUD0ivh5ADBX4WwuI12DUd2to3nCYe2eayMIw= -golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/sdk/storage/azdatalake/internal/shared/access_conditions.go b/sdk/storage/azdatalake/internal/exported/access_conditions.go similarity index 51% rename from sdk/storage/azdatalake/internal/shared/access_conditions.go rename to sdk/storage/azdatalake/internal/exported/access_conditions.go index a51de4518f6b..974cb1ed628e 100644 --- a/sdk/storage/azdatalake/internal/shared/access_conditions.go +++ b/sdk/storage/azdatalake/internal/exported/access_conditions.go @@ -1,16 +1,36 @@ -package shared +package exported import ( "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake" "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" ) +// AccessConditions identifies container-specific access conditions which you optionally set. +type AccessConditions struct { + ModifiedAccessConditions *ModifiedAccessConditions + LeaseAccessConditions *LeaseAccessConditions +} + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = generated.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = generated.ModifiedAccessConditions + // FormatContainerAccessConditions formats FilesystemAccessConditions into container's LeaseAccessConditions and ModifiedAccessConditions. -func FormatContainerAccessConditions(b *azdatalake.AccessConditions) *container.AccessConditions { +func FormatContainerAccessConditions(b *AccessConditions) *container.AccessConditions { if b == nil { - return nil + return &container.AccessConditions{ + LeaseAccessConditions: &container.LeaseAccessConditions{}, + ModifiedAccessConditions: &container.ModifiedAccessConditions{}, + } + } + if b.LeaseAccessConditions == nil { + b.LeaseAccessConditions = &LeaseAccessConditions{} + } + if b.ModifiedAccessConditions == nil { + b.ModifiedAccessConditions = &ModifiedAccessConditions{} } return &container.AccessConditions{ LeaseAccessConditions: &container.LeaseAccessConditions{ @@ -26,9 +46,15 @@ func FormatContainerAccessConditions(b *azdatalake.AccessConditions) *container. } // FormatPathAccessConditions formats PathAccessConditions into path's LeaseAccessConditions and ModifiedAccessConditions. -func FormatPathAccessConditions(p *azdatalake.AccessConditions) (*generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { +func FormatPathAccessConditions(p *AccessConditions) (*generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { if p == nil { - return nil, nil + return &generated.LeaseAccessConditions{}, &generated.ModifiedAccessConditions{} + } + if p.LeaseAccessConditions == nil { + p.LeaseAccessConditions = &LeaseAccessConditions{} + } + if p.ModifiedAccessConditions == nil { + p.ModifiedAccessConditions = &ModifiedAccessConditions{} } return &generated.LeaseAccessConditions{ LeaseID: p.LeaseAccessConditions.LeaseID, @@ -41,9 +67,18 @@ func FormatPathAccessConditions(p *azdatalake.AccessConditions) (*generated.Leas } // FormatBlobAccessConditions formats PathAccessConditions into blob's LeaseAccessConditions and ModifiedAccessConditions. -func FormatBlobAccessConditions(p *azdatalake.AccessConditions) *blob.AccessConditions { +func FormatBlobAccessConditions(p *AccessConditions) *blob.AccessConditions { if p == nil { - return nil + return &blob.AccessConditions{ + LeaseAccessConditions: &blob.LeaseAccessConditions{}, + ModifiedAccessConditions: &blob.ModifiedAccessConditions{}, + } + } + if p.LeaseAccessConditions == nil { + p.LeaseAccessConditions = &LeaseAccessConditions{} + } + if p.ModifiedAccessConditions == nil { + p.ModifiedAccessConditions = &ModifiedAccessConditions{} } return &blob.AccessConditions{LeaseAccessConditions: &blob.LeaseAccessConditions{ LeaseID: p.LeaseAccessConditions.LeaseID, diff --git a/sdk/storage/azdatalake/internal/exported/path.go b/sdk/storage/azdatalake/internal/exported/path.go new file mode 100644 index 000000000000..eabd8aa3ddaa --- /dev/null +++ b/sdk/storage/azdatalake/internal/exported/path.go @@ -0,0 +1 @@ +package exported diff --git a/sdk/storage/azdatalake/internal/testcommon/clients_auth.go b/sdk/storage/azdatalake/internal/testcommon/clients_auth.go new file mode 100644 index 000000000000..e65e4914b2c6 --- /dev/null +++ b/sdk/storage/azdatalake/internal/testcommon/clients_auth.go @@ -0,0 +1,110 @@ +package testcommon + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/filesystem" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/service" + "github.com/stretchr/testify/require" + "testing" +) + +const ( + DefaultEndpointSuffix = "core.windows.net/" + DefaultBlobEndpointSuffix = "blob.core.windows.net/" + AccountNameEnvVar = "AZURE_STORAGE_ACCOUNT_NAME" + AccountKeyEnvVar = "AZURE_STORAGE_ACCOUNT_KEY" + DefaultEndpointSuffixEnvVar = "AZURE_STORAGE_ENDPOINT_SUFFIX" + SubscriptionID = "SUBSCRIPTION_ID" + ResourceGroupName = "RESOURCE_GROUP_NAME" +) + +const ( + FakeStorageAccount = "fakestorage" + FakeBlobStorageURL = "https://fakestorage.blob.core.windows.net" + FakeDFSStorageURL = "https://fakestorage.dfs.core.windows.net" + FakeToken = "faketoken" +) + +var BasicMetadata = map[string]*string{"Foo": to.Ptr("bar")} + +type TestAccountType string + +const ( + TestAccountDefault TestAccountType = "" + TestAccountSecondary TestAccountType = "SECONDARY_" + TestAccountPremium TestAccountType = "PREMIUM_" + TestAccountSoftDelete TestAccountType = "SOFT_DELETE_" + TestAccountDatalake TestAccountType = "DATALAKE_" + TestAccountImmutable TestAccountType = "IMMUTABLE_" +) + +func SetClientOptions(t *testing.T, opts *azcore.ClientOptions) { + opts.Logging.AllowedHeaders = append(opts.Logging.AllowedHeaders, "X-Request-Mismatch", "X-Request-Mismatch-Error") + + transport, err := recording.NewRecordingHTTPClient(t, nil) + require.NoError(t, err) + opts.Transport = transport +} + +func GetGenericAccountInfo(accountType TestAccountType) (string, string) { + if recording.GetRecordMode() == recording.PlaybackMode { + return FakeStorageAccount, "ZmFrZQ==" + } + accountNameEnvVar := string(accountType) + AccountNameEnvVar + accountKeyEnvVar := string(accountType) + AccountKeyEnvVar + accountName, _ := GetRequiredEnv(accountNameEnvVar) + accountKey, _ := GetRequiredEnv(accountKeyEnvVar) + return accountName, accountKey +} + +func GetGenericSharedKeyCredential(accountType TestAccountType) (*azdatalake.SharedKeyCredential, error) { + accountName, accountKey := GetGenericAccountInfo(accountType) + if accountName == "" || accountKey == "" { + return nil, errors.New(string(accountType) + AccountNameEnvVar + " and/or " + string(accountType) + AccountKeyEnvVar + " environment variables not specified.") + } + return azdatalake.NewSharedKeyCredential(accountName, accountKey) +} + +func GetServiceClient(t *testing.T, accountType TestAccountType, options *service.ClientOptions) (*service.Client, error) { + if options == nil { + options = &service.ClientOptions{} + } + + SetClientOptions(t, &options.ClientOptions) + + cred, err := GetGenericSharedKeyCredential(accountType) + if err != nil { + return nil, err + } + + serviceClient, err := service.NewClientWithSharedKeyCredential("https://"+cred.AccountName()+".dfs.core.windows.net/", cred, options) + + return serviceClient, err +} + +func GetFilesystemClient(fsName string, t *testing.T, accountType TestAccountType, options *filesystem.ClientOptions) (*filesystem.Client, error) { + if options == nil { + options = &filesystem.ClientOptions{} + } + + SetClientOptions(t, &options.ClientOptions) + + cred, err := GetGenericSharedKeyCredential(accountType) + if err != nil { + return nil, err + } + + filesystemClient, err := filesystem.NewClientWithSharedKeyCredential("https://"+cred.AccountName()+".dfs.core.windows.net/"+fsName, cred, options) + + return filesystemClient, err +} + +func DeleteFilesystem(ctx context.Context, _require *require.Assertions, filesystemClient *filesystem.Client) { + _, err := filesystemClient.Delete(ctx, nil) + _require.Nil(err) +} diff --git a/sdk/storage/azdatalake/internal/testcommon/common.go b/sdk/storage/azdatalake/internal/testcommon/common.go new file mode 100644 index 000000000000..71534c98af95 --- /dev/null +++ b/sdk/storage/azdatalake/internal/testcommon/common.go @@ -0,0 +1,78 @@ +package testcommon + +import ( + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/stretchr/testify/require" + "os" + "strings" + "testing" + "time" +) + +const ( + FilesystemPrefix = "gofs" + FilePrefix = "gotestfile" + DefaultData = "Godatalakedata" + InvalidHeaderErrorSubstring = "invalid header field" // error thrown by the http client +) + +func GenerateFilesystemName(testName string) string { + return FilesystemPrefix + GenerateEntityName(testName) +} + +func GenerateEntityName(testName string) string { + return strings.ReplaceAll(strings.ReplaceAll(strings.ToLower(testName), "/", ""), "test", "") +} + +func BeforeTest(t *testing.T, suite string, test string) { + const blobURLRegex = `https://\S+\.blob\.core\.windows\.net` + const dfsURLRegex = `https://\S+\.dfs\.core\.windows\.net` + const tokenRegex = `(?:Bearer\s).*` + + require.NoError(t, recording.AddURISanitizer(FakeBlobStorageURL, blobURLRegex, nil)) + require.NoError(t, recording.AddURISanitizer(FakeDFSStorageURL, dfsURLRegex, nil)) + require.NoError(t, recording.AddHeaderRegexSanitizer("x-ms-copy-source", FakeBlobStorageURL, blobURLRegex, nil)) + require.NoError(t, recording.AddHeaderRegexSanitizer("x-ms-copy-source", FakeDFSStorageURL, dfsURLRegex, nil)) + require.NoError(t, recording.AddHeaderRegexSanitizer("x-ms-copy-source-authorization", FakeToken, tokenRegex, nil)) + // we freeze request IDs and timestamps to avoid creating noisy diffs + // NOTE: we can't freeze time stamps as that breaks some tests that use if-modified-since etc (maybe it can be fixed?) + //testframework.AddHeaderRegexSanitizer("X-Ms-Date", "Wed, 10 Aug 2022 23:34:14 GMT", "", nil) + require.NoError(t, recording.AddHeaderRegexSanitizer("x-ms-request-id", "00000000-0000-0000-0000-000000000000", "", nil)) + //testframework.AddHeaderRegexSanitizer("Date", "Wed, 10 Aug 2022 23:34:14 GMT", "", nil) + // TODO: more freezing + //testframework.AddBodyRegexSanitizer("RequestId:00000000-0000-0000-0000-000000000000", `RequestId:\w{8}-\w{4}-\w{4}-\w{4}-\w{12}`, nil) + //testframework.AddBodyRegexSanitizer("Time:2022-08-11T00:21:56.4562741Z", `Time:\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(?:\.\d*)?Z`, nil) + require.NoError(t, recording.Start(t, "sdk/storage/azdatalake/testdata", nil)) +} + +func AfterTest(t *testing.T, suite string, test string) { + require.NoError(t, recording.Stop(t, nil)) +} + +// GetRequiredEnv gets an environment variable by name and returns an error if it is not found +func GetRequiredEnv(name string) (string, error) { + env, ok := os.LookupEnv(name) + if ok { + return env, nil + } else { + return "", errors.New("Required environment variable not set: " + name) + } +} + +func ValidateBlobErrorCode(_require *require.Assertions, err error, code bloberror.Code) { + _require.NotNil(err) + var responseErr *azcore.ResponseError + errors.As(err, &responseErr) + if responseErr != nil { + _require.Equal(string(code), responseErr.ErrorCode) + } else { + _require.Contains(err.Error(), code) + } +} + +func GetRelativeTimeFromAnchor(anchorTime *time.Time, amount time.Duration) time.Time { + return anchorTime.Add(amount * time.Second) +} diff --git a/sdk/storage/azdatalake/lease/filesystem_client.go b/sdk/storage/azdatalake/lease/filesystem_client.go index e2c5100a859e..110171d15990 100644 --- a/sdk/storage/azdatalake/lease/filesystem_client.go +++ b/sdk/storage/azdatalake/lease/filesystem_client.go @@ -10,15 +10,12 @@ import ( "context" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/lease" "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/filesystem" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/base" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" ) // FilesystemClient provides lease functionality for the underlying filesystem client. type FilesystemClient struct { - filesystemClient *filesystem.Client - leaseID *string - containerClient *lease.ContainerClient + leaseID *string + containerClient *lease.ContainerClient } // FilesystemClientOptions contains the optional values when creating a FilesystemClient. @@ -27,10 +24,6 @@ type FilesystemClientOptions struct { LeaseID *string } -func (c *FilesystemClient) generated() *generated.FileSystemClient { - return base.InnerClient((*base.Client[generated.FileSystemClient])(c.filesystemClient)) -} - // NewFilesystemClient creates a filesystem lease client for the provided filesystem client. // - client - an instance of a filesystem client // - options - client options; pass nil to accept the default values diff --git a/sdk/storage/azdatalake/lease/models.go b/sdk/storage/azdatalake/lease/models.go index 4d94dafc5ec0..c09d30a21b40 100644 --- a/sdk/storage/azdatalake/lease/models.go +++ b/sdk/storage/azdatalake/lease/models.go @@ -9,12 +9,12 @@ package lease import ( "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/lease" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" ) // FilesystemAcquireOptions contains the optional parameters for the LeaseClient.AcquireLease method. type FilesystemAcquireOptions struct { - ModifiedAccessConditions *azdatalake.ModifiedAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions } func (o *FilesystemAcquireOptions) format() *lease.ContainerAcquireOptions { @@ -31,7 +31,7 @@ func (o *FilesystemAcquireOptions) format() *lease.ContainerAcquireOptions { // FilesystemBreakOptions contains the optional parameters for the LeaseClient.BreakLease method. type FilesystemBreakOptions struct { BreakPeriod *int32 - ModifiedAccessConditions *azdatalake.ModifiedAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions } func (o *FilesystemBreakOptions) format() *lease.ContainerBreakOptions { @@ -48,7 +48,7 @@ func (o *FilesystemBreakOptions) format() *lease.ContainerBreakOptions { // FilesystemChangeOptions contains the optional parameters for the LeaseClient.ChangeLease method. type FilesystemChangeOptions struct { - ModifiedAccessConditions *azdatalake.ModifiedAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions } func (o *FilesystemChangeOptions) format() *lease.ContainerChangeOptions { @@ -63,7 +63,7 @@ func (o *FilesystemChangeOptions) format() *lease.ContainerChangeOptions { } type FilesystemReleaseOptions struct { - ModifiedAccessConditions *azdatalake.ModifiedAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions } func (o *FilesystemReleaseOptions) format() *lease.ContainerReleaseOptions { @@ -78,7 +78,7 @@ func (o *FilesystemReleaseOptions) format() *lease.ContainerReleaseOptions { } type FilesystemRenewOptions struct { - ModifiedAccessConditions *azdatalake.ModifiedAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions } func (o *FilesystemRenewOptions) format() *lease.ContainerRenewOptions { @@ -94,7 +94,7 @@ func (o *FilesystemRenewOptions) format() *lease.ContainerRenewOptions { // PathAcquireOptions contains the optional parameters for the LeaseClient.AcquireLease method. type PathAcquireOptions struct { - ModifiedAccessConditions *azdatalake.ModifiedAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions } func (o *PathAcquireOptions) format() *lease.BlobAcquireOptions { @@ -111,7 +111,7 @@ func (o *PathAcquireOptions) format() *lease.BlobAcquireOptions { // PathBreakOptions contains the optional parameters for the LeaseClient.BreakLease method. type PathBreakOptions struct { BreakPeriod *int32 - ModifiedAccessConditions *azdatalake.ModifiedAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions } func (o *PathBreakOptions) format() *lease.BlobBreakOptions { @@ -128,7 +128,7 @@ func (o *PathBreakOptions) format() *lease.BlobBreakOptions { // PathChangeOptions contains the optional parameters for the LeaseClient.ChangeLease method. type PathChangeOptions struct { - ModifiedAccessConditions *azdatalake.ModifiedAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions } func (o *PathChangeOptions) format() *lease.BlobChangeOptions { @@ -143,7 +143,7 @@ func (o *PathChangeOptions) format() *lease.BlobChangeOptions { } type PathReleaseOptions struct { - ModifiedAccessConditions *azdatalake.ModifiedAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions } func (o *PathReleaseOptions) format() *lease.BlobReleaseOptions { @@ -158,7 +158,7 @@ func (o *PathReleaseOptions) format() *lease.BlobReleaseOptions { } type PathRenewOptions struct { - ModifiedAccessConditions *azdatalake.ModifiedAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions } func (o *PathRenewOptions) format() *lease.BlobRenewOptions { @@ -171,3 +171,12 @@ func (o *PathRenewOptions) format() *lease.BlobRenewOptions { }, } } + +// AccessConditions identifies blob-specific access conditions which you optionally set. +type AccessConditions = exported.AccessConditions + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = exported.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = exported.ModifiedAccessConditions diff --git a/sdk/storage/azdatalake/lease/path_client.go b/sdk/storage/azdatalake/lease/path_client.go index 57b06f5c20a2..ec7aa846c47e 100644 --- a/sdk/storage/azdatalake/lease/path_client.go +++ b/sdk/storage/azdatalake/lease/path_client.go @@ -11,15 +11,11 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/lease" "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/directory" "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/file" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/base" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/path" ) // PathClient provides lease functionality for the underlying path client. type PathClient struct { blobClient *lease.BlobClient - pathClient *path.Client leaseID *string } @@ -37,10 +33,6 @@ func NewPathClient[T directory.Client | file.Client](client *T, options *PathCli return nil, nil } -func (c *PathClient) generated() *generated.PathClient { - return base.InnerClient((*base.Client[generated.PathClient])(c.pathClient)) -} - // LeaseID returns leaseID of the client. func (c *PathClient) LeaseID() *string { return c.leaseID diff --git a/sdk/storage/azdatalake/service/client.go b/sdk/storage/azdatalake/service/client.go index eeac42200d11..129bb5533f9a 100644 --- a/sdk/storage/azdatalake/service/client.go +++ b/sdk/storage/azdatalake/service/client.go @@ -27,6 +27,38 @@ type ClientOptions base.ClientOptions // Client represents a URL to the Azure Datalake Storage service. type Client base.CompositeClient[generated.ServiceClient, generated.ServiceClient, service.Client] +// NewClient creates an instance of Client with the specified values. +// - serviceURL - the URL of the blob e.g. https://.dfs.core.windows.net/ +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + blobServiceURL := strings.Replace(serviceURL, ".dfs.", ".blob.", 1) + datalakeServiceURL := strings.Replace(serviceURL, ".blob.", ".dfs.", 1) + + authPolicy := shared.NewStorageChallengePolicy(cred) + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{ + PerRetry: []policy.Policy{authPolicy}, + } + base.SetPipelineOptions((*base.ClientOptions)(conOptions), &plOpts) + + azClient, err := azcore.NewClient(shared.ServiceClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + if options == nil { + options = &ClientOptions{} + } + blobServiceClientOpts := service.ClientOptions{ + ClientOptions: options.ClientOptions, + } + blobSvcClient, _ := service.NewClient(blobServiceURL, cred, &blobServiceClientOpts) + svcClient := base.NewServiceClient(datalakeServiceURL, blobServiceURL, blobSvcClient, azClient, nil, (*base.ClientOptions)(conOptions)) + + return (*Client)(svcClient), nil +} + // NewClientWithNoCredential creates an instance of Client with the specified values. // - serviceURL - the URL of the storage account e.g. https://.dfs.core.windows.net/ // - options - client options; pass nil to accept the default values. @@ -43,6 +75,9 @@ func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Clie return nil, err } + if options == nil { + options = &ClientOptions{} + } blobServiceClientOpts := service.ClientOptions{ ClientOptions: options.ClientOptions, } @@ -72,6 +107,9 @@ func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredenti return nil, err } + if options == nil { + options = &ClientOptions{} + } blobServiceClientOpts := service.ClientOptions{ ClientOptions: options.ClientOptions, } @@ -105,6 +143,8 @@ func NewClientFromConnectionString(connectionString string, options *ClientOptio // NewFilesystemClient creates a new share.Client object by concatenating shareName to the end of this Client's URL. // The new share.Client uses the same request policy pipeline as the Client. func (s *Client) NewFilesystemClient(filesystemName string) *filesystem.Client { + //fsURL := runtime.JoinPaths(s.generatedServiceClientWithDFS().Endpoint(), filesystemName) + //return (*filesystem.Client)(base.NewFilesystemClient(fsURL, s.generated().Pipeline(), s.credential())) return nil } @@ -120,12 +160,12 @@ func (s *Client) NewFileClient(fileName string) *filesystem.Client { return nil } -func (s *Client) generatedFSClientWithDFS() *generated.ServiceClient { +func (s *Client) generatedServiceClientWithDFS() *generated.ServiceClient { svcClientWithDFS, _, _ := base.InnerClients((*base.CompositeClient[generated.ServiceClient, generated.ServiceClient, service.Client])(s)) return svcClientWithDFS } -func (s *Client) generatedFSClientWithBlob() *generated.ServiceClient { +func (s *Client) generatedServiceClientWithBlob() *generated.ServiceClient { _, svcClientWithBlob, _ := base.InnerClients((*base.CompositeClient[generated.ServiceClient, generated.ServiceClient, service.Client])(s)) return svcClientWithBlob } @@ -139,9 +179,14 @@ func (s *Client) sharedKey() *exported.SharedKeyCredential { return base.SharedKeyComposite((*base.CompositeClient[generated.ServiceClient, generated.ServiceClient, service.Client])(s)) } -// URL returns the URL endpoint used by the Client object. -func (s *Client) URL() string { - return "s.generated().Endpoint()" +// DFSURL returns the URL endpoint used by the Client object. +func (s *Client) DFSURL() string { + return s.generatedServiceClientWithDFS().Endpoint() +} + +// BlobURL returns the URL endpoint used by the Client object. +func (s *Client) BlobURL() string { + return s.generatedServiceClientWithBlob().Endpoint() } // CreateFilesystem creates a new filesystem under the specified account. (blob3)