From 4aca52c7c2cf0d0fb3a5d05eb9ec4989a04c7f6f Mon Sep 17 00:00:00 2001 From: Gauri Prasad <51212198+gapra-msft@users.noreply.github.com> Date: Tue, 15 Aug 2023 09:19:57 -0700 Subject: [PATCH] Migrated blob and file code to Track 2 (#2242) --- ChangeLog.md | 14 + cmd/benchmark.go | 36 +- cmd/copy.go | 90 ++- cmd/copyEnumeratorInit.go | 71 +- cmd/copyEnumeratorInit_test.go | 75 +- cmd/copyUtil.go | 56 +- cmd/copyUtil_test.go | 45 +- cmd/credentialUtil.go | 166 ++-- cmd/gcpNameResolver_test.go | 4 +- cmd/jobsResume.go | 2 +- cmd/login.go | 16 +- cmd/make.go | 51 +- cmd/pathUtils.go | 85 +- cmd/root.go | 18 +- cmd/s3NameResolver_test.go | 4 +- cmd/sync.go | 2 +- cmd/syncProcessor.go | 96 ++- cmd/zc_enumerator.go | 148 ++-- cmd/zc_filter.go | 9 +- cmd/zc_newobjectadapters.go | 210 ++++- cmd/zc_pipeline_init.go | 6 +- cmd/zc_processor.go | 2 +- cmd/zc_traverser_benchmark.go | 2 +- cmd/zc_traverser_blob.go | 271 ++++--- cmd/zc_traverser_blob_account.go | 46 +- cmd/zc_traverser_blob_versions.go | 46 +- cmd/zc_traverser_file.go | 265 ++++--- cmd/zc_traverser_file_account.go | 43 +- cmd/zc_traverser_local.go | 8 +- cmd/zt_copy_blob_download_test.go | 189 +++-- cmd/zt_copy_blob_file_test.go | 117 ++- cmd/zt_copy_blob_upload_test.go | 77 +- cmd/zt_copy_file_file_test.go | 82 +- cmd/zt_copy_s2smigration_test.go | 201 ++--- cmd/zt_generic_processor_test.go | 20 +- cmd/zt_generic_service_traverser_test.go | 67 +- cmd/zt_generic_traverser_test.go | 176 +++-- cmd/zt_make_test.go | 194 +++++ cmd/zt_overwrite_posix_properties_test.go | 17 +- cmd/zt_remove_blob_test.go | 254 +++--- cmd/zt_remove_copy_test.go | 14 +- cmd/zt_remove_file_test.go | 126 +-- cmd/zt_scenario_helpers_for_test.go | 343 ++++---- cmd/zt_set_properties_test.go | 416 +++++----- cmd/zt_sync_blob_blob_test.go | 310 ++++---- cmd/zt_sync_blob_local_test.go | 169 ++-- cmd/zt_sync_comparator_test.go | 215 +++--- cmd/zt_sync_local_blob_test.go | 83 +- cmd/zt_sync_local_blob_windows_test.go | 32 +- cmd/zt_sync_processor_test.go | 29 +- cmd/zt_test.go | 377 +++++---- cmd/zt_traverser_blob_test.go | 90 +-- cmd/zt_unit_test.go | 70 ++ cmd/zt_user_input_test.go | 4 +- common/access.go | 30 +- common/clientFactory.go | 293 +++++++ common/credCacheGnomeKeyringShim_linux.go | 3 + common/credCacheInternal_linux.go | 3 +- common/credentialFactory.go | 70 +- common/environment.go | 12 +- common/extensions.go | 18 - common/extensions_test.go | 5 +- common/fe-ste-models.go | 228 ++---- common/fe-ste-models_test.go | 40 +- common/gcpModels.go | 3 +- common/genericResourceURLParts.go | 40 +- common/iff.go | 81 +- common/logger.go | 56 +- common/oauthTokenManager.go | 731 ++++++------------ common/prologueState.go | 4 +- common/rpc-models.go | 24 +- common/s3Models.go | 3 +- common/unixStatAdapter.go | 48 +- common/version.go | 2 +- e2etest/arm.go | 6 +- e2etest/config.go | 40 +- e2etest/declarativeHelpers.go | 7 +- e2etest/declarativeResourceAdapters.go | 109 ++- e2etest/declarativeResourceManagers.go | 100 +-- e2etest/declarativeScenario.go | 49 +- e2etest/declarativeTestFiles.go | 28 +- e2etest/declarativeWithPropertyProviders.go | 9 +- e2etest/factory.go | 163 ++-- e2etest/helpers.go | 312 ++++---- e2etest/managedDisks.go | 4 +- e2etest/pointers.go | 6 - e2etest/runner.go | 2 +- e2etest/scenario_helpers.go | 541 +++++++------ e2etest/zt_basic_copy_sync_remove_test.go | 12 +- e2etest/zt_client_provided_key_test.go | 54 ++ e2etest/zt_copy_file_smb_test.go | 5 +- e2etest/zt_preserve_access_tier_test.go | 11 +- e2etest/zt_preserve_properties_test.go | 7 +- e2etest/zt_preserve_smb_properties_test.go | 21 +- e2etest/zt_remove_test.go | 46 +- e2etest/zt_resume_test.go | 7 +- e2etest/zt_resume_windows_test.go | 4 +- e2etest/zt_trailingdot_test.go | 7 +- go.mod | 25 +- go.sum | 59 +- jobsAdmin/JobsAdmin.go | 11 +- jobsAdmin/init.go | 4 +- perf-test.yaml | 28 +- ste/ErrorExt.go | 16 +- ste/JobPartPlan.go | 12 +- ste/downloader-azureFiles.go | 24 +- ste/downloader-azureFiles_linux.go | 21 +- ste/downloader-azureFiles_windows.go | 20 +- ste/downloader-blob.go | 50 +- ste/downloader-blobFS.go | 2 +- ste/fileAttributesHelper.go | 102 +++ ste/mgr-JobMgr.go | 37 +- ste/mgr-JobPartMgr.go | 377 ++++----- ste/mgr-JobPartTransferMgr.go | 100 ++- ste/pacedReadSeeker.go | 2 +- ste/remoteObjectExists.go | 27 +- ste/s2sCopier-URLToBlob.go | 38 +- ste/securityInfoPersistenceManager.go | 52 +- ste/sender-appendBlob.go | 86 ++- ste/sender-appendBlobFromLocal.go | 34 +- ste/sender-appendBlobFromURL.go | 42 +- ste/sender-azureFile.go | 302 +++++--- ste/sender-azureFileFromLocal.go | 13 +- ste/sender-azureFileFromURL.go | 14 +- ste/sender-blobFS.go | 75 +- ste/sender-blobFolders.go | 147 ++-- ste/sender-blobFolders_linux.go | 2 +- ste/sender-blobFolders_other.go | 2 +- ste/sender-blobSymlinks.go | 56 +- ste/sender-blobSymlinks_linux.go | 2 +- ste/sender-blockBlob.go | 157 ++-- ste/sender-blockBlobFromLocal.go | 68 +- ste/sender-blockBlobFromURL.go | 108 +-- ste/sender-pageBlob.go | 141 ++-- ste/sender-pageBlobFromLocal.go | 36 +- ste/sender-pageBlobFromURL.go | 78 +- ste/sender.go | 29 +- ste/sender_pageBlobFromURL_test.go | 27 +- ste/sourceInfoProvider-Blob.go | 76 +- ste/sourceInfoProvider-File.go | 167 +++- ste/sourceInfoProvider-GCP.go | 14 +- ste/sourceInfoProvider-Local_linux.go | 6 +- ste/sourceInfoProvider-Local_windows.go | 6 +- ste/sourceInfoProvider-S3.go | 10 +- ste/sourceInfoProvider.go | 24 +- ste/xfer-anyToRemote-file.go | 39 +- ste/xfer-deleteBlob.go | 32 +- ste/xfer-deleteFile.go | 72 +- ste/xfer-setProperties.go | 77 +- ste/xfer.go | 11 +- ste/xferLogPolicy.go | 235 ++++++ ste/xferRetryNotificationPolicy.go | 34 +- ste/xferRetrypolicy.go | 173 +---- ste/xferStatsPolicy.go | 40 + ste/xferVersionPolicy.go | 120 +++ testSuite/cmd/clean.go | 265 ++++--- testSuite/cmd/common.go | 15 +- testSuite/cmd/create.go | 175 ++--- testSuite/cmd/list.go | 56 +- testSuite/cmd/testblob.go | 353 ++++----- testSuite/cmd/testfile.go | 154 ++-- .../scripts/test_autodetect_blob_type.py | 2 +- testSuite/scripts/test_azcopy_operations.py | 2 +- testSuite/scripts/test_blob_download.py | 2 +- testSuite/scripts/test_blobfs_download_SAS.py | 2 +- .../scripts/test_blobfs_download_sharedkey.py | 2 +- testSuite/scripts/test_blobfs_upload_SAS.py | 2 +- .../scripts/test_blobfs_upload_sharedkey.py | 2 +- .../scripts/test_service_to_service_copy.py | 2 +- testSuite/scripts/test_upload_block_blob.py | 4 +- testSuite/scripts/test_upload_page_blob.py | 2 +- testSuite/scripts/utility.py | 6 +- 172 files changed, 7268 insertions(+), 6143 deletions(-) mode change 100755 => 100644 cmd/zc_enumerator.go create mode 100644 cmd/zt_make_test.go create mode 100644 cmd/zt_unit_test.go create mode 100644 common/clientFactory.go delete mode 100644 e2etest/pointers.go create mode 100644 ste/fileAttributesHelper.go create mode 100644 ste/xferVersionPolicy.go diff --git a/ChangeLog.md b/ChangeLog.md index 0c3b08911..c298e815e 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -1,6 +1,19 @@ # Change Log +## Version 10.21.0-Preview + +### New Features + +1. Migrated to the latest [azblob SDK](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob). +2. Migrated to the latest [azfile SDK](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azfile). +3. Migrated from deprecated ADAL to MSAL through the latest [azidentity SDK](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity). +4. Deprecated support for object IDs in MSI. Client ID or Resource ID can be used as an alternative. + +### Special notes + +1. Due to the migration from ADAL to MSAL, tenant ID must now be set when authorizing with single tenant applications created after 10/15/2018. + ## Version 10.20.1 ### Bug Fixes @@ -28,6 +41,7 @@ 7. Fixed an issue where `--skip-version-check` would not be honored for `login`,` logout`, `help` commands. [#2299](https://github.com/Azure/azure-storage-azcopy/issues/2299) ### Documentation + 1. Add a log for LMTs when a mismatch is encountered. 2. Added documentation indicating the `login` and `logout` commands will be deprecated in the future. diff --git a/cmd/benchmark.go b/cmd/benchmark.go index 978c35789..1a000b80f 100644 --- a/cmd/benchmark.go +++ b/cmd/benchmark.go @@ -23,6 +23,8 @@ package cmd import ( "errors" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + sharefile "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" "net/url" "os" "strconv" @@ -30,8 +32,6 @@ import ( "github.com/Azure/azure-storage-azcopy/v10/azbfs" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" "github.com/spf13/cobra" ) @@ -176,43 +176,47 @@ func (raw rawBenchmarkCmdArgs) cook() (CookedCopyCmdArgs, error) { } func (raw rawBenchmarkCmdArgs) appendVirtualDir(target, virtualDir string) (string, error) { - - u, err := url.Parse(target) - if err != nil { - return "", fmt.Errorf("error parsing the url %s. Failed with error %s", target, err.Error()) - } - - var result url.URL - switch InferArgumentLocation(target) { case common.ELocation.Blob(): - p := azblob.NewBlobURLParts(*u) + p, err := blob.ParseURL(target) + if err != nil { + return "", fmt.Errorf("error parsing the url %s. Failed with error %s", target, err.Error()) + } if p.ContainerName == "" || p.BlobName != "" { return "", errors.New("the blob target must be a container") } p.BlobName = virtualDir - result = p.URL() + return p.String(), err case common.ELocation.File(): - p := azfile.NewFileURLParts(*u) + p, err := sharefile.ParseURL(target) + if err != nil { + return "", fmt.Errorf("error parsing the url %s. Failed with error %s", target, err.Error()) + } if p.ShareName == "" || p.DirectoryOrFilePath != "" { - return "", errors.New("the Azure Files target must be a file share root") + return "", errors.New("the file share target must be a file share root") } p.DirectoryOrFilePath = virtualDir - result = p.URL() + return p.String(), err case common.ELocation.BlobFS(): + u, err := url.Parse(target) + if err != nil { + return "", fmt.Errorf("error parsing the url %s. Failed with error %s", target, err.Error()) + } + + var result url.URL p := azbfs.NewBfsURLParts(*u) if p.FileSystemName == "" || p.DirectoryOrFilePath != "" { return "", errors.New("the blobFS target must be a file system") } p.DirectoryOrFilePath = virtualDir result = p.URL() + return result.String(), nil default: return "", errors.New("benchmarking only supports https connections to Blob, Azure Files, and ADLS Gen2") } - return result.String(), nil } // define a cleanup job diff --git a/cmd/copy.go b/cmd/copy.go index 135294fce..27aa7ff07 100644 --- a/cmd/copy.go +++ b/cmd/copy.go @@ -26,6 +26,8 @@ import ( "encoding/json" "errors" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" "io" "math" "net/url" @@ -39,7 +41,6 @@ import ( "github.com/Azure/azure-pipeline-go/pipeline" - "github.com/Azure/azure-storage-blob-go/azblob" "github.com/spf13/cobra" "github.com/Azure/azure-storage-azcopy/v10/common" @@ -837,7 +838,7 @@ func (raw rawCopyCmdArgs) cook() (CookedCopyCmdArgs, error) { if err != nil { return cooked, fmt.Errorf("error parsing the exclude-blob-type %s provided with exclude-blob-type flag ", blobType) } - cooked.excludeBlobType = append(cooked.excludeBlobType, eBlobType.ToAzBlobType()) + cooked.excludeBlobType = append(cooked.excludeBlobType, eBlobType.ToBlobType()) } } @@ -947,7 +948,7 @@ func validatePreserveSMBPropertyOption(toPreserve bool, fromTo common.FromTo, ov } else if toPreserve && !(fromTo == common.EFromTo.LocalFile() || fromTo == common.EFromTo.FileLocal() || fromTo == common.EFromTo.FileFile()) { - return fmt.Errorf("%s is set but the job is not between %s-aware resources", flagName, common.IffString(flagName == PreservePermissionsFlag, "permission", "SMB")) + return fmt.Errorf("%s is set but the job is not between %s-aware resources", flagName, common.Iff(flagName == PreservePermissionsFlag, "permission", "SMB")) } if toPreserve && (fromTo.IsUpload() || fromTo.IsDownload()) && @@ -1116,7 +1117,7 @@ type CookedCopyCmdArgs struct { // options from flags blockSize int64 // list of blobTypes to exclude while enumerating the transfer - excludeBlobType []azblob.BlobType + excludeBlobType []blob.BlobType blobType common.BlobType // Blob index tags categorize data in your storage account utilizing key-value tag attributes. // These tags are automatically indexed and exposed as a queryable multi-dimensional index to easily find data. @@ -1281,11 +1282,8 @@ func (cca *CookedCopyCmdArgs) processRedirectionDownload(blobResource common.Res return fmt.Errorf("fatal: cannot find auth on source blob URL: %s", err.Error()) } - // step 1: initialize pipeline - p, err := createBlobPipeline(ctx, credInfo, pipeline.LogNone) - if err != nil { - return err - } + // step 1: create client options + options := createClientOptions(pipeline.LogNone, nil, nil) // step 2: parse source url u, err := blobResource.FullURL() @@ -1294,17 +1292,17 @@ func (cca *CookedCopyCmdArgs) processRedirectionDownload(blobResource common.Res } // step 3: start download - blobURL := azblob.NewBlobURL(*u, p) - clientProvidedKey := azblob.ClientProvidedKeyOptions{} - if cca.CpkOptions.IsSourceEncrypted { - clientProvidedKey = common.GetClientProvidedKey(cca.CpkOptions) - } - blobStream, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, clientProvidedKey) + blobClient := common.CreateBlobClient(u.String(), credInfo, nil, options) + + blobStream, err := blobClient.DownloadStream(ctx, &blob.DownloadStreamOptions{ + CPKInfo: cca.CpkOptions.GetCPKInfo(), + CPKScopeInfo: cca.CpkOptions.GetCPKScopeInfo(), + }) if err != nil { return fmt.Errorf("fatal: cannot download blob due to error: %s", err.Error()) } - blobBody := blobStream.Body(azblob.RetryReaderOptions{MaxRetryRequests: ste.MaxRetryPerDownloadBody, ClientProvidedKeyOptions: clientProvidedKey}) + blobBody := blobStream.NewRetryReader(ctx, &blob.RetryReaderOptions{MaxRetries: ste.MaxRetryPerDownloadBody}) defer blobBody.Close() // step 4: pipe everything into Stdout @@ -1328,14 +1326,11 @@ func (cca *CookedCopyCmdArgs) processRedirectionUpload(blobResource common.Resou credInfo, _, err := GetCredentialInfoForLocation(ctx, common.ELocation.Blob(), blobResource.Value, blobResource.SAS, false, cca.CpkOptions) if err != nil { - return fmt.Errorf("fatal: cannot find auth on source blob URL: %s", err.Error()) + return fmt.Errorf("fatal: cannot find auth on destination blob URL: %s", err.Error()) } // step 0: initialize pipeline - p, err := createBlobPipeline(ctx, credInfo, pipeline.LogNone) - if err != nil { - return err - } + options := createClientOptions(pipeline.LogNone, nil, nil) // step 1: parse destination url u, err := blobResource.FullURL() @@ -1344,34 +1339,36 @@ func (cca *CookedCopyCmdArgs) processRedirectionUpload(blobResource common.Resou } // step 2: leverage high-level call in Blob SDK to upload stdin in parallel - blockBlobUrl := azblob.NewBlockBlobURL(*u, p) + blockBlobClient := common.CreateBlockBlobClient(u.String(), credInfo, nil, options) + metadataString := cca.metadata metadataMap := common.Metadata{} if len(metadataString) > 0 { for _, keyAndValue := range strings.Split(metadataString, ";") { // key/value pairs are separated by ';' kv := strings.Split(keyAndValue, "=") // key/value are separated by '=' - metadataMap[kv[0]] = kv[1] + metadataMap[kv[0]] = &kv[1] } } blobTags := cca.blobTags - bbAccessTier := azblob.DefaultAccessTier + bbAccessTier := blob.AccessTier("") if cca.blockBlobTier != common.EBlockBlobTier.None() { - bbAccessTier = azblob.AccessTierType(cca.blockBlobTier.String()) - } - _, err = azblob.UploadStreamToBlockBlob(ctx, os.Stdin, blockBlobUrl, azblob.UploadStreamToBlockBlobOptions{ - BufferSize: int(blockSize), - MaxBuffers: pipingUploadParallelism, - Metadata: metadataMap.ToAzBlobMetadata(), - BlobTagsMap: blobTags.ToAzBlobTagsMap(), - BlobHTTPHeaders: azblob.BlobHTTPHeaders{ - ContentType: cca.contentType, - ContentLanguage: cca.contentLanguage, - ContentEncoding: cca.contentEncoding, - ContentDisposition: cca.contentDisposition, - CacheControl: cca.cacheControl, + bbAccessTier = blob.AccessTier(cca.blockBlobTier.String()) + } + _, err = blockBlobClient.UploadStream(ctx, os.Stdin, &blockblob.UploadStreamOptions{ + BlockSize: blockSize, + Concurrency: pipingUploadParallelism, + Metadata: metadataMap, + Tags: blobTags, + HTTPHeaders: &blob.HTTPHeaders{ + BlobContentType: &cca.contentType, + BlobContentLanguage: &cca.contentLanguage, + BlobContentEncoding: &cca.contentEncoding, + BlobContentDisposition: &cca.contentDisposition, + BlobCacheControl: &cca.cacheControl, }, - BlobAccessTier: bbAccessTier, - ClientProvidedKeyOptions: common.GetClientProvidedKey(cca.CpkOptions), + AccessTier: &bbAccessTier, + CPKInfo: cca.CpkOptions.GetCPKInfo(), + CPKScopeInfo: cca.CpkOptions.GetCPKScopeInfo(), }) return err @@ -1407,13 +1404,12 @@ func (cca *CookedCopyCmdArgs) getSrcCredential(ctx context.Context, jpo *common. cca.credentialInfo.OAuthTokenInfo = *tokenInfo jpo.CredentialInfo.OAuthTokenInfo = *tokenInfo } + jpo.CredentialInfo.S2SSourceTokenCredential, err = common.GetSourceBlobCredential(srcCredInfo, common.CredentialOpOptions{LogError: glcm.Info}) + if err != nil { + return srcCredInfo, err + } // if the source is not local then store the credential token if it was OAuth to avoid constant refreshing - jpo.CredentialInfo.SourceBlobToken = common.CreateBlobCredential(ctx, srcCredInfo, common.CredentialOpOptions{ - // LogInfo: glcm.Info, //Comment out for debugging - LogError: glcm.Info, - }) - cca.credentialInfo.SourceBlobToken = jpo.CredentialInfo.SourceBlobToken - srcCredInfo.SourceBlobToken = jpo.CredentialInfo.SourceBlobToken + cca.credentialInfo.S2SSourceTokenCredential = jpo.CredentialInfo.S2SSourceTokenCredential } } return srcCredInfo, nil @@ -1685,7 +1681,7 @@ func (cca *CookedCopyCmdArgs) ReportProgressOrExit(lcm common.LifecycleMgr) (tot cca.intervalStartTime = time.Now() cca.intervalBytesTransferred = summary.BytesOverWire - return common.Iffloat64(timeElapsed != 0, bytesInMb/timeElapsed, 0) * 8 + return common.Iff(timeElapsed != 0, bytesInMb/timeElapsed, 0) * 8 } glcm.Progress(func(format common.OutputFormat) string { if format == common.EOutputFormat.Json() { @@ -2054,4 +2050,4 @@ func init() { // Deprecate the old persist-smb-permissions flag _ = cpCmd.PersistentFlags().MarkHidden("preserve-smb-permissions") cpCmd.PersistentFlags().BoolVar(&raw.preservePermissions, PreservePermissionsFlag, false, "False by default. Preserves ACLs between aware resources (Windows and Azure Files, or ADLS Gen 2 to ADLS Gen 2). For Hierarchical Namespace accounts, you will need a container SAS or OAuth token with Modify Ownership and Modify Permissions permissions. For downloads, you will also need the --backup flag to restore permissions where the new Owner will not be the user running AzCopy. This flag applies to both files and folders, unless a file-only filter is specified (e.g. include-pattern).") -} +} \ No newline at end of file diff --git a/cmd/copyEnumeratorInit.go b/cmd/copyEnumeratorInit.go index 7914b340a..1239d080a 100755 --- a/cmd/copyEnumeratorInit.go +++ b/cmd/copyEnumeratorInit.go @@ -5,6 +5,10 @@ import ( "encoding/json" "errors" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" "github.com/Azure/azure-storage-azcopy/v10/azbfs" "log" "net/url" @@ -19,9 +23,6 @@ import ( "github.com/Azure/azure-pipeline-go/pipeline" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" - "github.com/Azure/azure-storage-azcopy/v10/common" ) @@ -394,7 +395,7 @@ func (cca *CookedCopyCmdArgs) InitModularFilters() []ObjectFilter { } if len(cca.excludeBlobType) != 0 { - excludeSet := map[azblob.BlobType]bool{} + excludeSet := map[blob.BlobType]bool{} for _, v := range cca.excludeBlobType { excludeSet[v] = true @@ -444,6 +445,14 @@ func (cca *CookedCopyCmdArgs) createDstContainer(containerName string, dstWithSA if dstCredInfo, _, err = GetCredentialInfoForLocation(ctx, cca.FromTo.To(), cca.Destination.Value, cca.Destination.SAS, false, cca.CpkOptions); err != nil { return err } + + var trailingDot *common.TrailingDotOption + var from *common.Location + if cca.FromTo.To() == common.ELocation.File() { + trailingDot = &cca.trailingDot + from = to.Ptr(cca.FromTo.From()) + } + options := createClientOptions(logLevel.ToPipelineLogLevel(), trailingDot, from) // TODO: we can pass cred here as well dstPipeline, err := InitPipeline(ctx, cca.FromTo.To(), dstCredInfo, logLevel.ToPipelineLogLevel(), cca.trailingDot, cca.FromTo.From()) if err != nil { @@ -463,29 +472,20 @@ func (cca *CookedCopyCmdArgs) createDstContainer(containerName string, dstWithSA return err } - dstURL, err := url.Parse(accountRoot) - - if err != nil { - return err - } + bsc := common.CreateBlobServiceClient(accountRoot, dstCredInfo, nil, options) + bcc := bsc.NewContainerClient(containerName) - bsu := azblob.NewServiceURL(*dstURL, dstPipeline) - bcu := bsu.NewContainerURL(containerName) - _, err = bcu.GetProperties(ctx, azblob.LeaseAccessConditions{}) + _, err = bcc.GetProperties(ctx, nil) if err == nil { return err // Container already exists, return gracefully } - _, err = bcu.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone) - - if stgErr, ok := err.(azblob.StorageError); ok { - if stgErr.ServiceCode() != azblob.ServiceCodeContainerAlreadyExists { - return err - } - } else { - return err + _, err = bcc.Create(ctx, nil) + if bloberror.HasCode(err, bloberror.ContainerAlreadyExists) { + return nil } + return err case common.ELocation.File(): // Grab the account root and parse it as a URL accountRoot, err := GetAccountRoot(dstWithSAS, cca.FromTo.To()) @@ -494,30 +494,21 @@ func (cca *CookedCopyCmdArgs) createDstContainer(containerName string, dstWithSA return err } - dstURL, err := url.Parse(accountRoot) + fsc := common.CreateFileServiceClient(accountRoot, dstCredInfo, nil, options) + sc := fsc.NewShareClient(containerName) - if err != nil { - return err - } - - fsu := azfile.NewServiceURL(*dstURL, dstPipeline) - shareURL := fsu.NewShareURL(containerName) - _, err = shareURL.GetProperties(ctx) + _, err = sc.GetProperties(ctx, nil) if err == nil { return err } // Create a destination share with the default service quota // TODO: Create a flag for the quota - _, err = shareURL.Create(ctx, azfile.Metadata{}, 0) - - if stgErr, ok := err.(azfile.StorageError); ok { - if stgErr.ServiceCode() != azfile.ServiceCodeShareAlreadyExists { - return err - } - } else { - return err + _, err = sc.Create(ctx, nil) + if fileerror.HasCode(err, fileerror.ShareAlreadyExists) { + return nil } + return err case common.ELocation.BlobFS(): // TODO: Implement blobfs container creation accountRoot, err := GetAccountRoot(dstWithSAS, cca.FromTo.To()) @@ -653,8 +644,8 @@ func (cca *CookedCopyCmdArgs) MakeEscapedRelativePath(source bool, dstIsDir bool relativePath = "/" + strings.Replace(object.relativePath, common.OS_PATH_SEPARATOR, common.AZCOPY_PATH_SEPARATOR_STRING, -1) } - if common.IffString(source, object.ContainerName, object.DstContainerName) != "" { - relativePath = `/` + common.IffString(source, object.ContainerName, object.DstContainerName) + relativePath + if common.Iff(source, object.ContainerName, object.DstContainerName) != "" { + relativePath = `/` + common.Iff(source, object.ContainerName, object.DstContainerName) + relativePath } else if !source && !cca.StripTopDir && cca.asSubdir { // Avoid doing this where the root is shared or renamed. // We ONLY need to do this adjustment to the destination. // The source SAS has already been removed. No need to convert it to a URL or whatever. @@ -691,7 +682,7 @@ func (cca *CookedCopyCmdArgs) MakeEscapedRelativePath(source bool, dstIsDir bool func NewFolderPropertyOption(fromTo common.FromTo, recursive, stripTopDir bool, filters []ObjectFilter, preserveSmbInfo, preservePermissions, preservePosixProperties, isDstNull, includeDirectoryStubs bool) (common.FolderPropertyOption, string) { getSuffix := func(willProcess bool) string { - willProcessString := common.IffString(willProcess, "will be processed", "will not be processed") + willProcessString := common.Iff(willProcess, "will be processed", "will not be processed") template := ". For the same reason, %s defined on folders %s" switch { @@ -745,4 +736,4 @@ func NewFolderPropertyOption(fromTo common.FromTo, recursive, stripTopDir bool, "Any empty folders will not be processed, because source and/or destination doesn't have full folder support" + getSuffix(false) -} +} \ No newline at end of file diff --git a/cmd/copyEnumeratorInit_test.go b/cmd/copyEnumeratorInit_test.go index 60d0e0f49..87c6e306a 100644 --- a/cmd/copyEnumeratorInit_test.go +++ b/cmd/copyEnumeratorInit_test.go @@ -24,7 +24,6 @@ import ( "context" "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-azcopy/v10/ste" - "github.com/Azure/azure-storage-blob-go/azblob" "github.com/stretchr/testify/assert" "testing" ) @@ -32,22 +31,22 @@ import ( // ============================================= BLOB TRAVERSER TESTS ======================================= func TestValidateSourceDirThatExists(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // Generate source container and blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + a.NotNil(cc) dirName := "source_dir" - createNewDirectoryStub(a, containerURL, dirName) + createNewDirectoryStub(a, cc, dirName) // set up to create blob traverser ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) - p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) // List - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, dirName) - blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) + rawBlobURLWithSAS := scenarioHelper{}.getBlobClientWithSAS(a, containerName, dirName).URL() + serviceClientWithSAS := scenarioHelper{}.getBlobServiceClientWithSASFromURL(a, rawBlobURLWithSAS) + blobTraverser := newBlobTraverser(rawBlobURLWithSAS, serviceClientWithSAS, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) // dir but recursive flag not set - fail cca := CookedCopyCmdArgs{StripTopDir: false, Recursive: false} @@ -63,21 +62,21 @@ func TestValidateSourceDirThatExists(t *testing.T) { func TestValidateSourceDirDoesNotExist(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // Generate source container and blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + a.NotNil(cc) dirName := "source_dir/" // set up to create blob traverser ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) - p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) // List - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, dirName) - blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) + rawBlobURLWithSAS := scenarioHelper{}.getBlobClientWithSAS(a, containerName, dirName).URL() + serviceClientWithSAS := scenarioHelper{}.getBlobServiceClientWithSASFromURL(a, rawBlobURLWithSAS) + blobTraverser := newBlobTraverser(rawBlobURLWithSAS, serviceClientWithSAS, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) // dir but recursive flag not set - fail cca := CookedCopyCmdArgs{StripTopDir: false, Recursive: false} @@ -93,22 +92,22 @@ func TestValidateSourceDirDoesNotExist(t *testing.T) { func TestValidateSourceFileExists(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // Generate source container and blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + a.NotNil(cc) fileName := "source_file" - _, fileName = createNewBlockBlob(a, containerURL, fileName) + _, fileName = createNewBlockBlob(a, cc, fileName) ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) - p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) // List - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, fileName) - blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) + rawBlobURLWithSAS := scenarioHelper{}.getBlobClientWithSAS(a, containerName, fileName).URL() + serviceClientWithSAS := scenarioHelper{}.getBlobServiceClientWithSASFromURL(a, rawBlobURLWithSAS) + blobTraverser := newBlobTraverser(rawBlobURLWithSAS, serviceClientWithSAS, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) cca := CookedCopyCmdArgs{StripTopDir: false, Recursive: false} err := cca.validateSourceDir(blobTraverser) @@ -118,21 +117,21 @@ func TestValidateSourceFileExists(t *testing.T) { func TestValidateSourceFileDoesNotExist(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // Generate source container and blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + a.NotNil(cc) fileName := "source_file" ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) - p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) // List - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, fileName) - blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) + rawBlobURLWithSAS := scenarioHelper{}.getBlobClientWithSAS(a, containerName, fileName).URL() + serviceClientWithSAS := scenarioHelper{}.getBlobServiceClientWithSASFromURL(a, rawBlobURLWithSAS) + blobTraverser := newBlobTraverser(rawBlobURLWithSAS, serviceClientWithSAS, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) cca := CookedCopyCmdArgs{StripTopDir: false, Recursive: false} err := cca.validateSourceDir(blobTraverser) @@ -142,21 +141,21 @@ func TestValidateSourceFileDoesNotExist(t *testing.T) { func TestValidateSourceWithWildCard(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // Generate source container and blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + a.NotNil(cc) dirName := "source_dir_does_not_exist" // set up to create blob traverser ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) - p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) // List - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, dirName) - blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) + rawBlobURLWithSAS := scenarioHelper{}.getBlobClientWithSAS(a, containerName, dirName).URL() + serviceClientWithSAS := scenarioHelper{}.getBlobServiceClientWithSASFromURL(a, rawBlobURLWithSAS) + blobTraverser := newBlobTraverser(rawBlobURLWithSAS, serviceClientWithSAS, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) // dir but recursive flag not set - fail cca := CookedCopyCmdArgs{StripTopDir: true, Recursive: false} diff --git a/cmd/copyUtil.go b/cmd/copyUtil.go index 5eab51b72..e77b94317 100644 --- a/cmd/copyUtil.go +++ b/cmd/copyUtil.go @@ -21,17 +21,13 @@ package cmd import ( - "context" "fmt" - "github.com/Azure/azure-storage-azcopy/v10/jobsAdmin" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "net/url" "os" "strings" - "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" ) const ( @@ -44,23 +40,31 @@ type copyHandlerUtil struct{} var gCopyUtil = copyHandlerUtil{} // checks if a given url points to a container or virtual directory, as opposed to a blob or prefix match -func (util copyHandlerUtil) urlIsContainerOrVirtualDirectory(url *url.URL) bool { - if azblob.NewBlobURLParts(*url).IPEndpointStyleInfo.AccountName == "" { +func (util copyHandlerUtil) urlIsContainerOrVirtualDirectory(rawURL string) bool { + parsedURL, err := url.Parse(rawURL) + if err != nil { + return false + } + blobURLParts, err := blob.ParseURL(rawURL) + if err != nil { + return false + } + if blobURLParts.IPEndpointStyleInfo.AccountName == "" { // Typical endpoint style // If there's no slashes after the first, it's a container. // If there's a slash on the end, it's a virtual directory/container. // Otherwise, it's just a blob. - if len(url.Path) == 0 { + if len(parsedURL.Path) == 0 { return true // We know for SURE that it's a account level URL } - return strings.HasSuffix(url.Path, "/") || strings.Count(url.Path[1:], "/") == 0 + return strings.HasSuffix(parsedURL.Path, "/") || strings.Count(parsedURL.Path[1:], "/") == 0 } else { // IP endpoint style: https://IP:port/accountname/container // If there's 2 or less slashes after the first, it's a container. // OR If there's a slash on the end, it's a virtual directory/container. // Otherwise, it's just a blob. - return strings.HasSuffix(url.Path, "/") || strings.Count(url.Path[1:], "/") <= 1 + return strings.HasSuffix(parsedURL.Path, "/") || strings.Count(parsedURL.Path[1:], "/") <= 1 } } @@ -118,39 +122,15 @@ func (util copyHandlerUtil) ConstructCommandStringFromArgs() string { return s.String() } -func (util copyHandlerUtil) urlIsAzureFileDirectory(ctx context.Context, url *url.URL, p pipeline.Pipeline) bool { - // Azure file share case - if util.urlIsContainerOrVirtualDirectory(url) { - return true - } - - // Need make request to ensure if it's directory - directoryURL := azfile.NewDirectoryURL(*url, p) - _, err := directoryURL.GetProperties(ctx) - if err != nil { - if jobsAdmin.JobsAdmin != nil { - jobsAdmin.JobsAdmin.LogToJobLog(fmt.Sprintf("Failed to check if the destination is a folder or a file (Azure Files). Assuming the destination is a file: %s", err), pipeline.LogWarning) - } - - return false - } - - return true -} - -func (util copyHandlerUtil) getContainerUrl(blobParts azblob.BlobURLParts) url.URL { - blobParts.BlobName = "" - return blobParts.URL() -} - // doesBlobRepresentAFolder verifies whether blob is valid or not. // Used to handle special scenarios or conditions. -func (util copyHandlerUtil) doesBlobRepresentAFolder(metadata azblob.Metadata) bool { +func (util copyHandlerUtil) doesBlobRepresentAFolder(metadata map[string]*string) bool { // this condition is to handle the WASB V1 directory structure. // HDFS driver creates a blob for the empty directories (let’s call it ‘myfolder’) // and names all the blobs under ‘myfolder’ as such: ‘myfolder/myblob’ // The empty directory has meta-data 'hdi_isfolder = true' - return metadata["hdi_isfolder"] == "true" + // Note: GoLang sometimes sets metadata keys with the first letter capitalized + return (metadata["hdi_isfolder"] != nil && strings.ToLower(*metadata["hdi_isfolder"]) == "true") || (metadata["Hdi_isfolder"] != nil && strings.ToLower(*metadata["Hdi_isfolder"]) == "true") } func startsWith(s string, t string) bool { @@ -160,4 +140,4 @@ func startsWith(s string, t string) bool { ///////////////////////////////////////////////////////////////////////////////////////////////// type s3URLPartsExtension struct { common.S3URLParts -} +} \ No newline at end of file diff --git a/cmd/copyUtil_test.go b/cmd/copyUtil_test.go index f4194bc85..15ae915ff 100644 --- a/cmd/copyUtil_test.go +++ b/cmd/copyUtil_test.go @@ -22,7 +22,6 @@ package cmd import ( "github.com/stretchr/testify/assert" - "net/url" "testing" ) @@ -30,25 +29,25 @@ func TestUrlIsContainerOrBlob(t *testing.T) { a := assert.New(t) util := copyHandlerUtil{} - testUrl := url.URL{Path: "/container/dir1"} - isContainer := util.urlIsContainerOrVirtualDirectory(&testUrl) + testUrl := "https://fakeaccount.core.windows.net/container/dir1" + isContainer := util.urlIsContainerOrVirtualDirectory(testUrl) a.False(isContainer) - testUrl.Path = "/container/dir1/dir2" - isContainer = util.urlIsContainerOrVirtualDirectory(&testUrl) + testUrl = "https://fakeaccount.core.windows.net/container/dir1/dir2" + isContainer = util.urlIsContainerOrVirtualDirectory(testUrl) a.False(isContainer) - testUrl.Path = "/container/" - isContainer = util.urlIsContainerOrVirtualDirectory(&testUrl) + testUrl = "https://fakeaccount.core.windows.net/container/" + isContainer = util.urlIsContainerOrVirtualDirectory(testUrl) a.True(isContainer) - testUrl.Path = "/container" - isContainer = util.urlIsContainerOrVirtualDirectory(&testUrl) + testUrl = "https://fakeaccount.core.windows.net/container" + isContainer = util.urlIsContainerOrVirtualDirectory(testUrl) a.True(isContainer) // root container - testUrl.Path = "/" - isContainer = util.urlIsContainerOrVirtualDirectory(&testUrl) + testUrl = "https://fakeaccount.core.windows.net/" + isContainer = util.urlIsContainerOrVirtualDirectory(testUrl) a.True(isContainer) } @@ -56,24 +55,24 @@ func TestIPIsContainerOrBlob(t *testing.T) { a := assert.New(t) util := copyHandlerUtil{} - testIP := url.URL{Host: "127.0.0.1:8256", Path: "/account/container"} - testURL := url.URL{Path: "/account/container"} - isContainerIP := util.urlIsContainerOrVirtualDirectory(&testIP) - isContainerURL := util.urlIsContainerOrVirtualDirectory(&testURL) + testIP := "https://127.0.0.1:8256/account/container" + testURL := "https://fakeaccount.core.windows.net/account/container" + isContainerIP := util.urlIsContainerOrVirtualDirectory(testIP) + isContainerURL := util.urlIsContainerOrVirtualDirectory(testURL) a.True(isContainerIP) // IP endpoints contain the account in the path, making the container the second entry a.False(isContainerURL) // URL endpoints do not contain the account in the path, making the container the first entry. - testURL.Path = "/account/container/folder" - testIP.Path = "/account/container/folder" - isContainerIP = util.urlIsContainerOrVirtualDirectory(&testIP) - isContainerURL = util.urlIsContainerOrVirtualDirectory(&testURL) + testIP = "https://127.0.0.1:8256/account/container/folder" + testURL = "https://fakeaccount.core.windows.net/account/container/folder" + isContainerIP = util.urlIsContainerOrVirtualDirectory(testIP) + isContainerURL = util.urlIsContainerOrVirtualDirectory(testURL) a.False(isContainerIP) // IP endpoints contain the account in the path, making the container the second entry a.False(isContainerURL) // URL endpoints do not contain the account in the path, making the container the first entry. - testURL.Path = "/account/container/folder/" - testIP.Path = "/account/container/folder/" - isContainerIP = util.urlIsContainerOrVirtualDirectory(&testIP) - isContainerURL = util.urlIsContainerOrVirtualDirectory(&testURL) + testIP = "https://127.0.0.1:8256/account/container/folder/" + testURL = "https://fakeaccount.core.windows.net/account/container/folder/" + isContainerIP = util.urlIsContainerOrVirtualDirectory(testIP) + isContainerURL = util.urlIsContainerOrVirtualDirectory(testURL) a.True(isContainerIP) // IP endpoints contain the account in the path, making the container the second entry a.True(isContainerURL) // URL endpoints do not contain the account in the path, making the container the first entry. // The behaviour isn't too different from here. diff --git a/cmd/credentialUtil.go b/cmd/credentialUtil.go index ddc3cafed..f7b8018e8 100644 --- a/cmd/credentialUtil.go +++ b/cmd/credentialUtil.go @@ -26,6 +26,10 @@ import ( "context" "errors" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-storage-azcopy/v10/jobsAdmin" "net/http" "net/url" @@ -35,9 +39,6 @@ import ( "github.com/minio/minio-go/pkg/s3utils" "github.com/Azure/azure-pipeline-go/pipeline" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" - "github.com/Azure/azure-storage-azcopy/v10/azbfs" "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-azcopy/v10/ste" @@ -154,41 +155,40 @@ func getBlobCredentialType(ctx context.Context, blobResourceURL string, canBePub resourceURL.RawQuery = standaloneSAS } - sas := azblob.NewBlobURLParts(*resourceURL).SAS + blobResourceURL = resourceURL.String() + blobURLParts, err := blob.ParseURL(blobResourceURL) + if err != nil { + return common.ECredentialType.Unknown(), false, errors.New("provided blob resource string was not able to be parsed") + } + sas := blobURLParts.SAS isMDAccount := strings.HasPrefix(resourceURL.Host, "md-") canBePublic = canBePublic && !isMDAccount // MD accounts cannot be public. // If SAS existed, return anonymous credential type. + clientOptions := ste.NewClientOptions(policy.RetryOptions{ + MaxRetries: ste.UploadMaxTries, + TryTimeout: ste.UploadTryTimeout, + RetryDelay: ste.UploadRetryDelay, + MaxRetryDelay: ste.UploadMaxRetryDelay, + }, policy.TelemetryOptions{ + ApplicationID: glcm.AddUserAgentPrefix(common.UserAgent), + }, nil, nil, ste.LogOptions{ + RequestLogOptions: ste.RequestLogOptions{ + SyslogDisabled: common.IsForceLoggingDisabled(), + }, + }, nil, nil) + credInfo := common.CredentialInfo{CredentialType: common.ECredentialType.Anonymous()} if isSASExisted := sas.Signature() != ""; isSASExisted { if isMDAccount { // Ping the account anyway, and discern if we need OAuth. - p := azblob.NewPipeline( - azblob.NewAnonymousCredential(), - azblob.PipelineOptions{ - Retry: azblob.RetryOptions{ - Policy: azblob.RetryPolicyExponential, - MaxTries: ste.UploadMaxTries, - TryTimeout: ste.UploadTryTimeout, - RetryDelay: ste.UploadRetryDelay, - MaxRetryDelay: ste.UploadMaxRetryDelay, - }, - RequestLog: azblob.RequestLogOptions{ - SyslogDisabled: common.IsForceLoggingDisabled(), - }, - }) - - clientProvidedKey := azblob.ClientProvidedKeyOptions{} - if cpkOptions.IsSourceEncrypted { - clientProvidedKey = common.GetClientProvidedKey(cpkOptions) - } - - bURL := azblob.NewBlobURL(*resourceURL, p) - _, err := bURL.GetProperties(ctx, azblob.BlobAccessConditions{}, clientProvidedKey) + blobClient := common.CreateBlobClient(blobResourceURL, credInfo, nil, clientOptions) + _, err = blobClient.GetProperties(ctx, &blob.GetPropertiesOptions{CPKInfo: cpkOptions.GetCPKInfo()}) if err != nil { - if stgErr, ok := err.(azblob.StorageError); ok { - if httpResp := stgErr.Response(); httpResp.StatusCode == 401 || httpResp.StatusCode == 403 { // *sometimes* the service can return 403s. - challenge := httpResp.Header.Get("WWW-Authenticate") + var respErr *azcore.ResponseError + if errors.As(err, &respErr) { + if respErr.StatusCode == 401 || respErr.StatusCode == 403 { // *sometimes* the service can return 403s. + challenge := respErr.RawResponse.Header.Get("WWW-Authenticate") if strings.Contains(challenge, common.MDResource) { if !oAuthTokenExists() { return common.ECredentialType.Unknown(), false, @@ -211,49 +211,36 @@ func getBlobCredentialType(ctx context.Context, blobResourceURL string, canBePub if !canBePublic { // Cannot possibly be public - like say a destination EP return false } - p := azblob.NewPipeline( - azblob.NewAnonymousCredential(), - azblob.PipelineOptions{ - Retry: azblob.RetryOptions{ - Policy: azblob.RetryPolicyExponential, - MaxTries: ste.UploadMaxTries, - TryTimeout: ste.UploadTryTimeout, - RetryDelay: ste.UploadRetryDelay, - MaxRetryDelay: ste.UploadMaxRetryDelay, - }, - RequestLog: azblob.RequestLogOptions{ - SyslogDisabled: common.IsForceLoggingDisabled(), - }, - }) - - isContainer := copyHandlerUtil{}.urlIsContainerOrVirtualDirectory(resourceURL) + isContainer := copyHandlerUtil{}.urlIsContainerOrVirtualDirectory(blobResourceURL) isPublicResource = false // Scenario 1: When resourceURL points to a container // Scenario 2: When resourceURL points to a virtual directory. // Check if the virtual directory is accessible by doing GetProperties on container. // Virtual directory can be accessed/scanned only when its parent container is public. - bURLParts := azblob.NewBlobURLParts(*resourceURL) + bURLParts, err := blob.ParseURL(blobResourceURL) + if err != nil { + return false + } bURLParts.BlobName = "" - containerURL := azblob.NewContainerURL(bURLParts.URL(), p) + bURLParts.Snapshot = "" + bURLParts.VersionID = "" + containerClient := common.CreateContainerClient(bURLParts.String(), credInfo, nil, clientOptions) if bURLParts.ContainerName == "" || strings.Contains(bURLParts.ContainerName, "*") { // Service level searches can't possibly be public. return false } - if _, err := containerURL.GetProperties(ctx, azblob.LeaseAccessConditions{}); err == nil { + if _, err := containerClient.GetProperties(ctx, nil); err == nil { return true } if !isContainer { - clientProvidedKey := azblob.ClientProvidedKeyOptions{} - if cpkOptions.IsSourceEncrypted { - clientProvidedKey = common.GetClientProvidedKey(cpkOptions) - } // Scenario 3: When resourceURL points to a blob - blobURL := azblob.NewBlobURL(*resourceURL, p) - if _, err := blobURL.GetProperties(ctx, azblob.BlobAccessConditions{}, clientProvidedKey); err == nil { + blobClient := common.CreateBlobClient(blobResourceURL, credInfo, nil, clientOptions) + + if _, err := blobClient.GetProperties(ctx, &blob.GetPropertiesOptions{CPKInfo: cpkOptions.GetCPKInfo()}); err == nil { return true } } @@ -646,47 +633,22 @@ func getCredentialType(ctx context.Context, raw rawFromToInfo, cpkOptions common // ============================================================================================== // pipeline factory methods // ============================================================================================== -func createBlobPipelineFromCred(credential azblob.Credential, logLevel pipeline.LogLevel) pipeline.Pipeline { - logOption := pipeline.LogOptions{} +func createClientOptions(logLevel pipeline.LogLevel, trailingDot *common.TrailingDotOption, from *common.Location) azcore.ClientOptions { + logOptions := ste.LogOptions{} if azcopyScanningLogger != nil { - logOption = pipeline.LogOptions{ + logOptions.LogOptions = pipeline.LogOptions{ Log: azcopyScanningLogger.Log, ShouldLog: func(level pipeline.LogLevel) bool { return level <= logLevel }, } } - - return ste.NewBlobPipeline( - credential, - azblob.PipelineOptions{ - Telemetry: azblob.TelemetryOptions{ - Value: glcm.AddUserAgentPrefix(common.UserAgent), - }, - Log: logOption, - }, - ste.XferRetryOptions{ - Policy: 0, - MaxTries: ste.UploadMaxTries, - TryTimeout: ste.UploadTryTimeout, - RetryDelay: ste.UploadRetryDelay, - MaxRetryDelay: ste.UploadMaxRetryDelay, - }, - nil, - ste.NewAzcopyHTTPClient(frontEndMaxIdleConnectionsPerHost), - nil, // we don't gather network stats on the credential pipeline - ) -} - -func createBlobPipeline(ctx context.Context, credInfo common.CredentialInfo, logLevel pipeline.LogLevel) (pipeline.Pipeline, error) { - // are we getting dest token? - credential := credInfo.SourceBlobToken - if credential == nil { - credential = common.CreateBlobCredential(ctx, credInfo, common.CredentialOpOptions{ - // LogInfo: glcm.Info, //Comment out for debugging - LogError: glcm.Info, - }) - } - - return createBlobPipelineFromCred(credential, logLevel), nil + return ste.NewClientOptions(policy.RetryOptions{ + MaxRetries: ste.UploadMaxTries, + TryTimeout: ste.UploadTryTimeout, + RetryDelay: ste.UploadRetryDelay, + MaxRetryDelay: ste.UploadMaxRetryDelay, + }, policy.TelemetryOptions{ + ApplicationID: glcm.AddUserAgentPrefix(common.UserAgent), + }, ste.NewAzcopyHTTPClient(frontEndMaxIdleConnectionsPerHost), nil, logOptions, trailingDot, from) } const frontEndMaxIdleConnectionsPerHost = http.DefaultMaxIdleConnsPerHost @@ -725,27 +687,3 @@ func createBlobFSPipeline(ctx context.Context, credInfo common.CredentialInfo, l nil, // we don't gather network stats on the credential pipeline ), nil } - -// TODO note: ctx and credInfo are ignored at the moment because we only support SAS for Azure File -func createFilePipeline(ctx context.Context, credInfo common.CredentialInfo, logLevel pipeline.LogLevel, trailingDot common.TrailingDotOption, from common.Location) (pipeline.Pipeline, error) { - logOption := pipeline.LogOptions{} - if azcopyScanningLogger != nil { - logOption = pipeline.LogOptions{ - Log: azcopyScanningLogger.Log, - ShouldLog: func(level pipeline.LogLevel) bool { return level <= logLevel }, - } - } - - return ste.NewFilePipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{ - Telemetry: azfile.TelemetryOptions{ - Value: glcm.AddUserAgentPrefix(common.UserAgent), - }, - Log: logOption, - }, azfile.RetryOptions{ - Policy: azfile.RetryPolicyExponential, - MaxTries: ste.UploadMaxTries, - TryTimeout: ste.UploadTryTimeout, - RetryDelay: ste.UploadRetryDelay, - MaxRetryDelay: ste.UploadMaxRetryDelay, - }, nil, ste.NewAzcopyHTTPClient(frontEndMaxIdleConnectionsPerHost), nil, trailingDot, from), nil -} diff --git a/cmd/gcpNameResolver_test.go b/cmd/gcpNameResolver_test.go index 5a9693a31..e3d0d8b10 100644 --- a/cmd/gcpNameResolver_test.go +++ b/cmd/gcpNameResolver_test.go @@ -122,8 +122,8 @@ func TestGCPBucketNameToAzureResourceResolverMultipleBucketNames(t *testing.T) { resolvedNameCollision2, err := r.ResolveName("a-b---c") a.Nil(err) - a.EqualValues(1, common.Iffint8(resolvedNameCollision1 == "a-b-3-c", 1, 0)^common.Iffint8(resolvedNameCollision2 == "a-b-3-c", 1, 0)) - a.EqualValues(1, common.Iffint8(resolvedNameCollision1 == "a-b-3-c-2", 1, 0)^common.Iffint8(resolvedNameCollision2 == "a-b-3-c-2", 1, 0)) + a.EqualValues(1, common.Iff(resolvedNameCollision1 == "a-b-3-c", 1, 0)^common.Iff(resolvedNameCollision2 == "a-b-3-c", 1, 0)) + a.EqualValues(1, common.Iff(resolvedNameCollision1 == "a-b-3-c-2", 1, 0)^common.Iff(resolvedNameCollision2 == "a-b-3-c-2", 1, 0)) } func TestGCPBucketNameToAzureResourceResolverNegative(t *testing.T) { diff --git a/cmd/jobsResume.go b/cmd/jobsResume.go index be9d91280..18357f3ae 100644 --- a/cmd/jobsResume.go +++ b/cmd/jobsResume.go @@ -104,7 +104,7 @@ func (cca *resumeJobController) ReportProgressOrExit(lcm common.LifecycleMgr) (t cca.intervalStartTime = time.Now() cca.intervalBytesTransferred = summary.BytesOverWire - return common.Iffloat64(timeElapsed != 0, bytesInMb/timeElapsed, 0) * 8 + return common.Iff(timeElapsed != 0, bytesInMb/timeElapsed, 0) * 8 } glcm.Progress(func(format common.OutputFormat) string { diff --git a/cmd/login.go b/cmd/login.go index b3e030ae1..7cfe03501 100644 --- a/cmd/login.go +++ b/cmd/login.go @@ -21,7 +21,6 @@ package cmd import ( - "context" "errors" "fmt" "strings" @@ -78,14 +77,17 @@ func init() { lgCmd.PersistentFlags().BoolVar(&loginCmdArg.servicePrincipal, "service-principal", false, "Log in via Service Principal Name (SPN) by using a certificate or a secret. The client secret or certificate password must be placed in the appropriate environment variable. Type AzCopy env to see names and descriptions of environment variables.") // Client ID of user-assigned identity. lgCmd.PersistentFlags().StringVar(&loginCmdArg.identityClientID, "identity-client-id", "", "Client ID of user-assigned identity.") - // Object ID of user-assigned identity. - lgCmd.PersistentFlags().StringVar(&loginCmdArg.identityObjectID, "identity-object-id", "", "Object ID of user-assigned identity.") // Resource ID of user-assigned identity. lgCmd.PersistentFlags().StringVar(&loginCmdArg.identityResourceID, "identity-resource-id", "", "Resource ID of user-assigned identity.") //login with SPN lgCmd.PersistentFlags().StringVar(&loginCmdArg.applicationID, "application-id", "", "Application ID of user-assigned identity. Required for service principal auth.") lgCmd.PersistentFlags().StringVar(&loginCmdArg.certPath, "certificate-path", "", "Path to certificate for SPN authentication. Required for certificate-based service principal auth.") + + // Deprecate the identity-object-id flag + _ = lgCmd.PersistentFlags().MarkHidden("identity-object-id") // Object ID of user-assigned identity. + lgCmd.PersistentFlags().StringVar(&loginCmdArg.identityObjectID, "identity-object-id", "", "Object ID of user-assigned identity. This parameter is deprecated. Please use client id or resource id") + } type loginCmdArgs struct { @@ -167,20 +169,20 @@ func (lca loginCmdArgs) process() error { case lca.servicePrincipal: if lca.certPath != "" { - if _, err := uotm.CertLogin(lca.tenantID, lca.aadEndpoint, lca.certPath, lca.certPass, lca.applicationID, lca.persistToken); err != nil { + if err := uotm.CertLogin(lca.tenantID, lca.aadEndpoint, lca.certPath, lca.certPass, lca.applicationID, lca.persistToken); err != nil { return err } glcm.Info("SPN Auth via cert succeeded.") } else { - if _, err := uotm.SecretLogin(lca.tenantID, lca.aadEndpoint, lca.clientSecret, lca.applicationID, lca.persistToken); err != nil { + if err := uotm.SecretLogin(lca.tenantID, lca.aadEndpoint, lca.clientSecret, lca.applicationID, lca.persistToken); err != nil { return err } glcm.Info("SPN Auth via secret succeeded.") } case lca.identity: - if _, err := uotm.MSILogin(context.TODO(), common.IdentityInfo{ + if err := uotm.MSILogin(common.IdentityInfo{ ClientID: lca.identityClientID, ObjectID: lca.identityObjectID, MSIResID: lca.identityResourceID, @@ -190,7 +192,7 @@ func (lca loginCmdArgs) process() error { // For MSI login, info success message to user. glcm.Info("Login with identity succeeded.") default: - if _, err := uotm.UserLogin(lca.tenantID, lca.aadEndpoint, lca.persistToken); err != nil { + if err := uotm.UserLogin(lca.tenantID, lca.aadEndpoint, lca.persistToken); err != nil { return err } // User fulfills login in browser, and there would be message in browser indicating whether login fulfilled successfully. diff --git a/cmd/make.go b/cmd/make.go index 5a0ba2783..2099dd7f5 100644 --- a/cmd/make.go +++ b/cmd/make.go @@ -24,6 +24,10 @@ import ( "context" "fmt" pipeline2 "github.com/Azure/azure-pipeline-go/pipeline" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" "net/url" "strings" @@ -32,8 +36,6 @@ import ( "github.com/Azure/azure-storage-azcopy/v10/azbfs" "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-azcopy/v10/ste" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" "github.com/spf13/cobra" ) @@ -86,6 +88,9 @@ func (cookedArgs cookedMakeCmdArgs) process() (err error) { return err } + // Note : trailing dot is only applicable to file operations anyway, so setting this to false + options := createClientOptions(pipeline2.LogNone, to.Ptr(common.ETrailingDotOption.Disable()), &cookedArgs.resourceLocation) + switch cookedArgs.resourceLocation { case common.ELocation.BlobFS(): p, err := createBlobFSPipeline(ctx, credentialInfo, pipeline2.LogNone) @@ -108,41 +113,31 @@ func (cookedArgs cookedMakeCmdArgs) process() (err error) { return err } case common.ELocation.Blob(): - p, err := createBlobPipeline(ctx, credentialInfo, pipeline2.LogNone) - if err != nil { - return err - } - containerURL := azblob.NewContainerURL(cookedArgs.resourceURL, p) - if _, err = containerURL.Create(ctx, nil, azblob.PublicAccessNone); err != nil { + // TODO : Ensure it is a container URL here and fail early? + containerClient := common.CreateContainerClient(cookedArgs.resourceURL.String(), credentialInfo, nil, options) + if _, err = containerClient.Create(ctx, nil); err != nil { // print a nicer error message if container already exists - if storageErr, ok := err.(azblob.StorageError); ok { - if storageErr.ServiceCode() == azblob.ServiceCodeContainerAlreadyExists { - return fmt.Errorf("the container already exists") - } else if storageErr.ServiceCode() == azblob.ServiceCodeResourceNotFound { - return fmt.Errorf("please specify a valid container URL with account SAS") - } + if bloberror.HasCode(err, bloberror.ContainerAlreadyExists) { + return fmt.Errorf("the container already exists") + } else if bloberror.HasCode(err, bloberror.ResourceNotFound) { + return fmt.Errorf("please specify a valid container URL with account SAS") } - // print the ugly error if unexpected return err } case common.ELocation.File(): - // Note: trailing dot does not apply to share level operations, so we just set it to false always - p, err := createFilePipeline(ctx, credentialInfo, pipeline2.LogNone, common.ETrailingDotOption.Enable(), cookedArgs.resourceLocation) - if err != nil { - return err + shareClient := common.CreateShareClient(cookedArgs.resourceURL.String(), credentialInfo, nil, options) + quota := &cookedArgs.quota + if quota != nil && *quota == 0 { + quota = nil } - shareURL := azfile.NewShareURL(cookedArgs.resourceURL, p) - if _, err = shareURL.Create(ctx, nil, cookedArgs.quota); err != nil { + if _, err = shareClient.Create(ctx, &share.CreateOptions{Quota: quota}); err != nil { // print a nicer error message if share already exists - if storageErr, ok := err.(azfile.StorageError); ok { - if storageErr.ServiceCode() == azfile.ServiceCodeShareAlreadyExists { - return fmt.Errorf("the file share already exists") - } else if storageErr.ServiceCode() == azfile.ServiceCodeResourceNotFound { - return fmt.Errorf("please specify a valid share URL with account SAS") - } + if fileerror.HasCode(err, fileerror.ShareAlreadyExists) { + return fmt.Errorf("the file share already exists") + } else if fileerror.HasCode(err, fileerror.ResourceNotFound) { + return fmt.Errorf("please specify a valid share URL with account SAS") } - // print the ugly error if unexpected return err } diff --git a/cmd/pathUtils.go b/cmd/pathUtils.go index 5ed48c0b5..dc7359564 100644 --- a/cmd/pathUtils.go +++ b/cmd/pathUtils.go @@ -2,11 +2,13 @@ package cmd import ( "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + blobsas "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" + sharefile "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + filesas "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" "net/url" "strings" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" "github.com/pkg/errors" "github.com/Azure/azure-storage-azcopy/v10/azbfs" @@ -107,7 +109,10 @@ func GetResourceRoot(resource string, location common.Location) (resourceBase st //noinspection GoNilness case common.ELocation.Blob(): - bURLParts := azblob.NewBlobURLParts(*resourceURL) + bURLParts, err := blob.ParseURL(resource) + if err != nil { + return resource, err + } if bURLParts.ContainerName == "" || strings.Contains(bURLParts.ContainerName, "*") { if bURLParts.BlobName != "" { @@ -117,38 +122,41 @@ func GetResourceRoot(resource string, location common.Location) (resourceBase st bURLParts.ContainerName = "" } - bURL := bURLParts.URL() - return bURL.String(), nil + return bURLParts.String(), nil //noinspection GoNilness case common.ELocation.File(): - bURLParts := azfile.NewFileURLParts(*resourceURL) + fURLParts, err := sharefile.ParseURL(resource) + if err != nil { + return resource, err + } - if bURLParts.ShareName == "" || strings.Contains(bURLParts.ShareName, "*") { - if bURLParts.DirectoryOrFilePath != "" { + if fURLParts.ShareName == "" || strings.Contains(fURLParts.ShareName, "*") { + if fURLParts.DirectoryOrFilePath != "" { return resource, errors.New("cannot combine account-level traversal and specific file/folder names.") } - bURLParts.ShareName = "" + fURLParts.ShareName = "" } - bURL := bURLParts.URL() - return bURL.String(), nil + return fURLParts.String(), nil //noinspection GoNilness - case common.ELocation.BlobFS(): - bURLParts := azfile.NewFileURLParts(*resourceURL) + case common.ELocation.BlobFS(): // TODO (gapra) change to datalake support + fURLParts, err := sharefile.ParseURL(resource) + if err != nil { + return resource, err + } - if bURLParts.ShareName == "" || strings.Contains(bURLParts.ShareName, "*") { - if bURLParts.DirectoryOrFilePath != "" { + if fURLParts.ShareName == "" || strings.Contains(fURLParts.ShareName, "*") { + if fURLParts.DirectoryOrFilePath != "" { return resource, errors.New("cannot combine account-level traversal and specific file/folder names.") } - bURLParts.ShareName = "" + fURLParts.ShareName = "" } - bURL := bURLParts.URL() - return bURL.String(), nil + return fURLParts.String(), nil // noinspection GoNilness case common.ELocation.S3(): @@ -233,28 +241,23 @@ func splitAuthTokenFromResource(resource string, location common.Location) (reso // It's not a breaking change to the way SAS tokens work, but a pretty major addition. // TODO: Find a clever way to reduce code duplication in here. Especially the URL parsing. case common.ELocation.Blob(): - var baseURL *url.URL // Do not shadow err for clean return statement - baseURL, err = url.Parse(resource) - + var bURLParts blobsas.URLParts + bURLParts, err = blob.ParseURL(resource) if err != nil { return resource, "", err } - bURLParts := azblob.NewBlobURLParts(*baseURL) resourceToken = bURLParts.SAS.Encode() - bURLParts.SAS = azblob.SASQueryParameters{} // clear the SAS token and drop the raw, base URL - blobURL := bURLParts.URL() // Can't call .String() on .URL() because Go can't take the pointer of a function's return - resourceBase = blobURL.String() + bURLParts.SAS = blobsas.QueryParameters{} // clear the SAS token and drop the raw, base URL + resourceBase = bURLParts.String() return case common.ELocation.File(): - var baseURL *url.URL // Do not shadow err for clean return statement - baseURL, err = url.Parse(resource) - + var fURLParts filesas.URLParts + fURLParts, err = sharefile.ParseURL(resource) if err != nil { return resource, "", err } - fURLParts := azfile.NewFileURLParts(*baseURL) resourceToken = fURLParts.SAS.Encode() if resourceToken == "" { // Azure Files only supports the use of SAS tokens currently @@ -262,9 +265,8 @@ func splitAuthTokenFromResource(resource string, location common.Location) (reso // Therefore, it is safe to error here if no SAS token is present, as neither a source nor a destination could safely not have a SAS token. return resource, "", errors.New("azure files only supports the use of SAS token authentication") } - fURLParts.SAS = azfile.SASQueryParameters{} // clear the SAS token and drop the raw, base URL - fileURL := fURLParts.URL() // Can't call .String() on .URL() because Go can't take the pointer of a function's return - resourceBase = fileURL.String() + fURLParts.SAS = filesas.QueryParameters{} // clear the SAS token and drop the raw, base URL + resourceBase = fURLParts.String() return case common.ELocation.BlobFS(): var baseURL *url.URL // Do not shadow err for clean return statement @@ -334,19 +336,23 @@ func GetAccountRoot(resource common.ResourceString, location common.Location) (s case common.ELocation.Blob(), common.ELocation.File(), common.ELocation.BlobFS(): - baseURL, err := resource.FullURL() - + baseURL, err := resource.String() if err != nil { return "", err } // Clear the path - bURLParts := azblob.NewBlobURLParts(*baseURL) + bURLParts, err := blob.ParseURL(baseURL) + if err != nil { + return "", err + } + bURLParts.ContainerName = "" bURLParts.BlobName = "" + bURLParts.Snapshot = "" + bURLParts.VersionID = "" - bURL := bURLParts.URL() - return bURL.String(), nil + return bURLParts.String(), nil default: return "", fmt.Errorf("cannot get account root on location type %s", location.String()) } @@ -359,13 +365,10 @@ func GetContainerName(path string, location common.Location) (string, error) { case common.ELocation.Blob(), common.ELocation.File(), common.ELocation.BlobFS(): - baseURL, err := url.Parse(path) - + bURLParts, err := blob.ParseURL(path) if err != nil { return "", err } - - bURLParts := azblob.NewBlobURLParts(*baseURL) return bURLParts.ContainerName, nil case common.ELocation.S3(): baseURL, err := url.Parse(path) diff --git a/cmd/root.go b/cmd/root.go index 596aebf1a..965c59559 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -24,8 +24,8 @@ import ( "bytes" "context" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-storage-azcopy/v10/jobsAdmin" - "net/url" "os" "runtime" "strings" @@ -36,7 +36,6 @@ import ( "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-azcopy/v10/ste" - "github.com/Azure/azure-storage-blob-go/azblob" "github.com/spf13/cobra" ) @@ -254,25 +253,20 @@ func beginDetectNewVersion() chan struct{} { } // step 1: initialize pipeline - p, err := createBlobPipeline(context.TODO(), common.CredentialInfo{CredentialType: common.ECredentialType.Anonymous()}, pipeline.LogNone) - if err != nil { - return - } + options := createClientOptions(pipeline.LogNone, nil, nil) - // step 2: parse source url - u, err := url.Parse(versionMetadataUrl) + // step 2: start download + blobClient, err := blob.NewClientWithNoCredential(versionMetadataUrl, &blob.ClientOptions{ClientOptions: options}) if err != nil { return } - // step 3: start download - blobURL := azblob.NewBlobURL(*u, p) - blobStream, err := blobURL.Download(context.TODO(), 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) + blobStream, err := blobClient.DownloadStream(context.TODO(), nil) if err != nil { return } - blobBody := blobStream.Body(azblob.RetryReaderOptions{MaxRetryRequests: ste.MaxRetryPerDownloadBody}) + blobBody := blobStream.NewRetryReader(context.TODO(), &blob.RetryReaderOptions{MaxRetries: ste.MaxRetryPerDownloadBody}) defer blobBody.Close() // step 4: read newest version str diff --git a/cmd/s3NameResolver_test.go b/cmd/s3NameResolver_test.go index 7a4023fe7..5a92e9f1b 100644 --- a/cmd/s3NameResolver_test.go +++ b/cmd/s3NameResolver_test.go @@ -120,8 +120,8 @@ func TestS3BucketNameToAzureResourceResolverMultipleBucketNames(t *testing.T) { resolvedNameCollision2, err := r.ResolveName("a-b---c") a.Nil(err) - a.EqualValues(1, common.Iffint8(resolvedNameCollision1 == "a-b-3-c", 1, 0)^common.Iffint8(resolvedNameCollision2 == "a-b-3-c", 1, 0)) - a.EqualValues(1, common.Iffint8(resolvedNameCollision1 == "a-b-3-c-2", 1, 0)^common.Iffint8(resolvedNameCollision2 == "a-b-3-c-2", 1, 0)) + a.EqualValues(1, common.Iff(resolvedNameCollision1 == "a-b-3-c", 1, 0)^common.Iff(resolvedNameCollision2 == "a-b-3-c", 1, 0)) + a.EqualValues(1, common.Iff(resolvedNameCollision1 == "a-b-3-c-2", 1, 0)^common.Iff(resolvedNameCollision2 == "a-b-3-c-2", 1, 0)) } func TestS3BucketNameToAzureResourceResolverNegative(t *testing.T) { diff --git a/cmd/sync.go b/cmd/sync.go index 84b340720..d0752ae6a 100644 --- a/cmd/sync.go +++ b/cmd/sync.go @@ -565,7 +565,7 @@ func (cca *cookedSyncCmdArgs) ReportProgressOrExit(lcm common.LifecycleMgr) (tot // compute the average throughput for the last time interval bytesInMb := float64(float64(summary.BytesOverWire-cca.intervalBytesTransferred) * 8 / float64(base10Mega)) timeElapsed := time.Since(cca.intervalStartTime).Seconds() - throughput = common.Iffloat64(timeElapsed != 0, bytesInMb/timeElapsed, 0) + throughput = common.Iff(timeElapsed != 0, bytesInMb/timeElapsed, 0) // reset the interval timer and byte count cca.intervalStartTime = time.Now() diff --git a/cmd/syncProcessor.go b/cmd/syncProcessor.go index 9e30f9cf2..7a1517139 100644 --- a/cmd/syncProcessor.go +++ b/cmd/syncProcessor.go @@ -25,11 +25,15 @@ import ( "encoding/json" "fmt" "github.com/Azure/azure-pipeline-go/pipeline" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + sharedirectory "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/directory" + sharefile "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" "github.com/Azure/azure-storage-azcopy/v10/azbfs" "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-azcopy/v10/ste" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" "net/url" "os" "path" @@ -142,7 +146,7 @@ func (d *interactiveDeleteProcessor) removeImmediately(object StoredObject) (err jsonOutput, err := json.Marshal(newDeleteTransfer(object)) common.PanicIfErr(err) return string(jsonOutput) - } else { // remove for sync + } else { // remove for sync if d.objectTypeToDisplay == "local file" { // removing from local src dryrunValue := fmt.Sprintf("DRYRUN: remove %v", common.ToShortPath(d.objectLocationToDisplay)) if runtime.GOOS == "windows" { @@ -281,14 +285,24 @@ func newSyncDeleteProcessor(cca *cookedSyncCmdArgs, fpo common.FolderPropertyOpt if err != nil { return nil, err } + var trailingDot *common.TrailingDotOption + var from *common.Location + if cca.fromTo.To() == common.ELocation.File() { + trailingDot = &cca.trailingDot + from = to.Ptr(cca.fromTo.From()) + } + + clientOptions := createClientOptions(azcopyLogVerbosity.ToPipelineLogLevel(), trailingDot, from) - return newInteractiveDeleteProcessor(newRemoteResourceDeleter(rawURL, p, ctx, cca.fromTo.To(), fpo, cca.forceIfReadOnly).delete, + return newInteractiveDeleteProcessor(newRemoteResourceDeleter(rawURL, p, cca.credentialInfo, clientOptions, ctx, cca.fromTo.To(), fpo, cca.forceIfReadOnly).delete, cca.deleteDestination, cca.fromTo.To().String(), cca.destination, cca.incrementDeletionCount, cca.dryrunMode), nil } type remoteResourceDeleter struct { rootURL *url.URL p pipeline.Pipeline + credInfo common.CredentialInfo + clientOptions azcore.ClientOptions ctx context.Context targetLocation common.Location folderManager common.FolderDeletionManager @@ -296,10 +310,12 @@ type remoteResourceDeleter struct { forceIfReadOnly bool } -func newRemoteResourceDeleter(rawRootURL *url.URL, p pipeline.Pipeline, ctx context.Context, targetLocation common.Location, fpo common.FolderPropertyOption, forceIfReadOnly bool) *remoteResourceDeleter { +func newRemoteResourceDeleter(rawRootURL *url.URL, p pipeline.Pipeline, credInfo common.CredentialInfo, clientOptions azcore.ClientOptions, ctx context.Context, targetLocation common.Location, fpo common.FolderPropertyOption, forceIfReadOnly bool) *remoteResourceDeleter { return &remoteResourceDeleter{ rootURL: rawRootURL, p: p, + credInfo: credInfo, + clientOptions: clientOptions, ctx: ctx, targetLocation: targetLocation, folderManager: common.NewFolderDeletionManager(ctx, fpo, azcopyScanningLogger), @@ -311,13 +327,27 @@ func newRemoteResourceDeleter(rawRootURL *url.URL, p pipeline.Pipeline, ctx cont func (b *remoteResourceDeleter) getObjectURL(object StoredObject) (url url.URL) { switch b.targetLocation { case common.ELocation.Blob(): - blobURLParts := azblob.NewBlobURLParts(*b.rootURL) + blobURLParts, err := blob.ParseURL(b.rootURL.String()) + if err != nil { + panic(err) + } blobURLParts.BlobName = path.Join(blobURLParts.BlobName, object.relativePath) - url = blobURLParts.URL() + u, err := url.Parse(blobURLParts.String()) + if err != nil { + panic(err) + } + url = *u case common.ELocation.File(): - fileURLParts := azfile.NewFileURLParts(*b.rootURL) + fileURLParts, err := sharefile.ParseURL(b.rootURL.String()) + if err != nil { + panic(err) + } fileURLParts.DirectoryOrFilePath = path.Join(fileURLParts.DirectoryOrFilePath, object.relativePath) - url = fileURLParts.URL() + u, err := url.Parse(fileURLParts.String()) + if err != nil { + panic(err) + } + url = *u case common.ELocation.BlobFS(): blobFSURLParts := azbfs.NewBfsURLParts(*b.rootURL) blobFSURLParts.DirectoryOrFilePath = path.Join(blobFSURLParts.DirectoryOrFilePath, object.relativePath) @@ -341,33 +371,39 @@ func (b *remoteResourceDeleter) delete(object StoredObject) error { b.folderManager.RecordChildExists(&objectURL) defer b.folderManager.RecordChildDeleted(&objectURL) - var err error switch b.targetLocation { case common.ELocation.Blob(): - blobURLParts := azblob.NewBlobURLParts(*b.rootURL) + blobURLParts, err := blob.ParseURL(b.rootURL.String()) + if err != nil { + return err + } blobURLParts.BlobName = path.Join(blobURLParts.BlobName, object.relativePath) - blobURL := azblob.NewBlobURL(blobURLParts.URL(), b.p) - _, err = blobURL.Delete(b.ctx, azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}) + + blobClient := common.CreateBlobClient(blobURLParts.String(), b.credInfo, nil, b.clientOptions) + _, err = blobClient.Delete(b.ctx, nil) + return err case common.ELocation.File(): - fileURLParts := azfile.NewFileURLParts(*b.rootURL) + fileURLParts, err := sharefile.ParseURL(b.rootURL.String()) + if err != nil { + return err + } fileURLParts.DirectoryOrFilePath = path.Join(fileURLParts.DirectoryOrFilePath, object.relativePath) - fileURL := azfile.NewFileURL(fileURLParts.URL(), b.p) - - _, err = fileURL.Delete(b.ctx) + fileClient := common.CreateShareFileClient(fileURLParts.String(), b.credInfo, nil, b.clientOptions) - if stgErr, ok := err.(azfile.StorageError); b.forceIfReadOnly && ok && stgErr.ServiceCode() == azfile.ServiceCodeReadOnlyAttribute { + _, err = fileClient.Delete(b.ctx, nil) + if err != nil && b.forceIfReadOnly && fileerror.HasCode(err, fileerror.ReadOnlyAttribute) { msg := fmt.Sprintf("read-only attribute detected, removing it before deleting the file %s", object.relativePath) if azcopyScanningLogger != nil { azcopyScanningLogger.Log(pipeline.LogInfo, msg) } // if the file is read-only, we need to remove the read-only attribute before we can delete it - noAttrib := azfile.FileAttributeNone - _, err = fileURL.SetHTTPHeaders(b.ctx, azfile.FileHTTPHeaders{SMBProperties: azfile.SMBProperties{FileAttributes: &noAttrib}}) + noAttrib := sharefile.NTFSFileAttributes{None: true} + _, err = fileClient.SetHTTPHeaders(b.ctx, &sharefile.SetHTTPHeadersOptions{SMBProperties: &sharefile.SMBProperties{Attributes: &noAttrib}}) if err == nil { - _, err = fileURL.Delete(b.ctx) + _, err = fileClient.Delete(b.ctx, nil) //nolint:staticcheck } else { msg := fmt.Sprintf("error %s removing the read-only attribute from the file %s", err.Error(), object.relativePath) glcm.Info(msg + "; check the scanning log file for more details") @@ -406,24 +442,24 @@ func (b *remoteResourceDeleter) delete(object StoredObject) error { var err error switch b.targetLocation { case common.ELocation.Blob(): - blobURL := azblob.NewBlobURL(objectURL, b.p) + blobClient := common.CreateBlobClient(objectURL.String(), b.credInfo, nil, b.clientOptions) // HNS endpoint doesn't like delete snapshots on a directory - _, err = blobURL.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) + _, err = blobClient.Delete(b.ctx, nil) case common.ELocation.File(): - dirURL := azfile.NewDirectoryURL(objectURL, b.p) - _, err = dirURL.Delete(ctx) + directoryClient := common.CreateShareDirectoryClient(objectURL.String(), b.credInfo, nil, b.clientOptions) + _, err = directoryClient.Delete(ctx, nil) - if stgErr, ok := err.(azfile.StorageError); b.forceIfReadOnly && ok && stgErr.ServiceCode() == azfile.ServiceCodeReadOnlyAttribute { + if err != nil && b.forceIfReadOnly && fileerror.HasCode(err, fileerror.ReadOnlyAttribute) { msg := fmt.Sprintf("read-only attribute detected, removing it before deleting the file %s", object.relativePath) if azcopyScanningLogger != nil { azcopyScanningLogger.Log(pipeline.LogInfo, msg) } // if the file is read-only, we need to remove the read-only attribute before we can delete it - noAttrib := azfile.FileAttributeNone - _, err = dirURL.SetProperties(b.ctx, azfile.SMBProperties{FileAttributes: &noAttrib}) + noAttrib := sharefile.NTFSFileAttributes{None: true} + _, err = directoryClient.SetProperties(b.ctx, &sharedirectory.SetPropertiesOptions{FileSMBProperties: &sharefile.SMBProperties{Attributes: &noAttrib}}) if err == nil { - _, err = dirURL.Delete(b.ctx) + _, err = directoryClient.Delete(b.ctx, nil) } else { msg := fmt.Sprintf("error %s removing the read-only attribute from the file %s", err.Error(), object.relativePath) glcm.Info(msg + "; check the scanning log file for more details") @@ -444,4 +480,4 @@ func (b *remoteResourceDeleter) delete(object StoredObject) error { return nil } -} +} \ No newline at end of file diff --git a/cmd/zc_enumerator.go b/cmd/zc_enumerator.go old mode 100755 new mode 100644 index 6ba699fb5..62529778d --- a/cmd/zc_enumerator.go +++ b/cmd/zc_enumerator.go @@ -24,6 +24,10 @@ import ( "context" "errors" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/lease" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" "net/url" "path/filepath" "runtime" @@ -32,9 +36,6 @@ import ( "time" "github.com/Azure/azure-pipeline-go/pipeline" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" - "github.com/Azure/azure-storage-azcopy/v10/common" ) @@ -52,7 +53,7 @@ type StoredObject struct { smbLastModifiedTime time.Time size int64 md5 []byte - blobType azblob.BlobType // will be "None" when unknown or not applicable + blobType blob.BlobType // will be "None" when unknown or not applicable // all of these will be empty when unknown or not applicable. contentDisposition string @@ -76,8 +77,8 @@ type StoredObject struct { // destination container name. Included in the processor after resolving container names. DstContainerName string // access tier, only included by blob traverser. - blobAccessTier azblob.AccessTierType - archiveStatus azblob.ArchiveStatusType + blobAccessTier blob.AccessTier + archiveStatus blob.ArchiveStatus // metadata, included in S2S transfers Metadata common.Metadata blobVersionID string @@ -86,9 +87,9 @@ type StoredObject struct { blobDeleted bool // Lease information - leaseState azblob.LeaseStateType - leaseStatus azblob.LeaseStatusType - leaseDuration azblob.LeaseDurationType + leaseState lease.StateType + leaseStatus lease.StatusType + leaseDuration lease.DurationType } func (s *StoredObject) isMoreRecentThan(storedObject2 StoredObject, preferSMBTime bool) bool { @@ -228,12 +229,19 @@ type contentPropsProvider interface { ContentMD5() []byte } type blobPropsProvider interface { - BlobType() azblob.BlobType - AccessTier() azblob.AccessTierType - LeaseStatus() azblob.LeaseStatusType - LeaseDuration() azblob.LeaseDurationType - LeaseState() azblob.LeaseStateType - ArchiveStatus() azblob.ArchiveStatusType + BlobType() blob.BlobType + AccessTier() blob.AccessTier + LeaseStatus() lease.StatusType + LeaseDuration() lease.DurationType + LeaseState() lease.StateType + ArchiveStatus() blob.ArchiveStatus +} +type filePropsProvider interface { + contentPropsProvider + Metadata() common.Metadata + LastModified() time.Time + FileLastWriteTime() time.Time + ContentLength() int64 } // a constructor is used so that in case the StoredObject has to change, the callers would get a compilation error @@ -366,7 +374,7 @@ func InitResourceTraverser(resource common.ResourceString, location common.Locat if location.IsLocal() { // First, ignore all escaped stars. Stars can be valid characters on many platforms (out of the 3 we support though, Windows is the only that cannot support it). // In the future, should we end up supporting another OS that does not treat * as a valid character, we should turn these checks into a map-check against runtime.GOOS. - tmpResource := common.IffString(runtime.GOOS == "windows", resource.ValueLocal(), strings.ReplaceAll(resource.ValueLocal(), `\*`, ``)) + tmpResource := common.Iff(runtime.GOOS == "windows", resource.ValueLocal(), strings.ReplaceAll(resource.ValueLocal(), `\*`, ``)) // check for remaining stars. We can't combine list traversers, and wildcarded list traversal occurs below. if strings.Contains(tmpResource, "*") { return nil, errors.New("cannot combine local wildcards with include-path or list-of-files") @@ -418,6 +426,7 @@ func InitResourceTraverser(resource common.ResourceString, location common.Locat output = ben case common.ELocation.Blob(): + // TODO (last service migration) : Remove dependency on URLs. resourceURL, err := resource.FullURL() if err != nil { return nil, err @@ -425,25 +434,35 @@ func InitResourceTraverser(resource common.ResourceString, location common.Locat recommendHttpsIfNecessary(*resourceURL) - if ctx == nil || p == nil { - return nil, errors.New("a valid credential and context must be supplied to create a blob traverser") + if ctx == nil { + return nil, errors.New("a valid context must be supplied to create a blob traverser") } + r := resourceURL.String() - burl := azblob.NewBlobURLParts(*resourceURL) - - if burl.ContainerName == "" || strings.Contains(burl.ContainerName, "*") { - + blobURLParts, err := blob.ParseURL(r) + if err != nil { + return nil, err + } + containerName := blobURLParts.ContainerName + // Strip any non-service related things away + blobURLParts.ContainerName = "" + blobURLParts.BlobName = "" + blobURLParts.Snapshot = "" + blobURLParts.VersionID = "" + bsc := common.CreateBlobServiceClient(blobURLParts.String(), *credential, &common.CredentialOpOptions{LogError: glcm.Info}, createClientOptions(logLevel, nil, nil)) + + if containerName == "" || strings.Contains(containerName, "*") { if !recursive { return nil, errors.New(accountTraversalInherentlyRecursiveError) } - - output = newBlobAccountTraverser(resourceURL, p, *ctx, includeDirectoryStubs, incrementEnumerationCounter, s2sPreserveBlobTags, cpkOptions, preservePermissions, false) + output = newBlobAccountTraverser(bsc, containerName, *ctx, includeDirectoryStubs, incrementEnumerationCounter, s2sPreserveBlobTags, cpkOptions, preservePermissions, false) } else if listOfVersionIds != nil { - output = newBlobVersionsTraverser(resourceURL, p, *ctx, recursive, includeDirectoryStubs, incrementEnumerationCounter, listOfVersionIds, cpkOptions) + output = newBlobVersionsTraverser(r, bsc, *ctx, includeDirectoryStubs, incrementEnumerationCounter, listOfVersionIds, cpkOptions) } else { - output = newBlobTraverser(resourceURL, p, *ctx, recursive, includeDirectoryStubs, incrementEnumerationCounter, s2sPreserveBlobTags, cpkOptions, includeDeleted, includeSnapshot, includeVersion, preservePermissions, false) + output = newBlobTraverser(r, bsc, *ctx, recursive, includeDirectoryStubs, incrementEnumerationCounter, s2sPreserveBlobTags, cpkOptions, includeDeleted, includeSnapshot, includeVersion, preservePermissions, false) } case common.ELocation.File(): + // TODO (last service migration) : Remove dependency on URLs. resourceURL, err := resource.FullURL() if err != nil { return nil, err @@ -451,20 +470,29 @@ func InitResourceTraverser(resource common.ResourceString, location common.Locat recommendHttpsIfNecessary(*resourceURL) - if ctx == nil || p == nil { - return nil, errors.New("a valid credential and context must be supplied to create a file traverser") + if ctx == nil { + return nil, errors.New("a valid context must be supplied to create a file traverser") } + r := resourceURL.String() - furl := azfile.NewFileURLParts(*resourceURL) - - if furl.ShareName == "" || strings.Contains(furl.ShareName, "*") { + fileURLParts, err := file.ParseURL(r) + if err != nil { + return nil, err + } + shareName := fileURLParts.ShareName + // Strip any non-service related things away + fileURLParts.ShareName = "" + fileURLParts.ShareSnapshot = "" + fileURLParts.DirectoryOrFilePath = "" + fsc := common.CreateFileServiceClient(fileURLParts.String(), *credential, &common.CredentialOpOptions{LogError: glcm.Info}, createClientOptions(logLevel, to.Ptr(trailingDot), destination)) + + if shareName == "" || strings.Contains(shareName, "*") { if !recursive { return nil, errors.New(accountTraversalInherentlyRecursiveError) } - - output = newFileAccountTraverser(resourceURL, p, *ctx, getProperties, incrementEnumerationCounter, trailingDot, destination) + output = newFileAccountTraverser(fsc, shareName, *ctx, getProperties, incrementEnumerationCounter, trailingDot, destination) } else { - output = newFileTraverser(resourceURL, p, *ctx, recursive, getProperties, incrementEnumerationCounter, trailingDot, destination) + output = newFileTraverser(r, fsc, *ctx, recursive, getProperties, incrementEnumerationCounter, trailingDot, destination) } case common.ELocation.BlobFS(): resourceURL, err := resource.FullURL() @@ -472,47 +500,35 @@ func InitResourceTraverser(resource common.ResourceString, location common.Locat return nil, err } - // check if credential is also nil here (would never trigger) to tame syntax highlighting. - // As a precondition to pipeline p, credential must not be nil anyway. - if ctx == nil || p == nil || credential == nil { - return nil, errors.New("a valid credential and context must be supplied to create a blobFS traverser") - } - recommendHttpsIfNecessary(*resourceURL) - // Convert BlobFS pipeline to blob-compatible pipeline - var credElement azblob.Credential - if credential.CredentialType == common.ECredentialType.SharedKey() { - // Convert the shared key credential to a blob credential & re-use it - credElement, err = azblob.NewSharedKeyCredential(glcm.GetEnvironmentVariable(common.EEnvironmentVariable.AccountName()), glcm.GetEnvironmentVariable(common.EEnvironmentVariable.AccountKey())) - - if err != nil { - return nil, err - } - } else { - // Get a standard blob credential, anything else is compatible - credElement = common.CreateBlobCredential(*ctx, *credential, common.CredentialOpOptions{ - LogError: glcm.Info, - }) + if ctx == nil { + return nil, errors.New("a valid context must be supplied to create a blob traverser") } - - blobPipeline := createBlobPipelineFromCred(credElement, logLevel) - - burl := azblob.NewBlobURLParts(*resourceURL) - burl.Host = strings.Replace(burl.Host, ".dfs", ".blob", 1) - blobResourceURL := burl.URL() + r := resourceURL.String() + r = strings.Replace(r, ".dfs", ".blob", 1) + blobURLParts, err := blob.ParseURL(r) + if err != nil { + return nil, err + } + containerName := blobURLParts.ContainerName + // Strip any non-service related things away + blobURLParts.ContainerName = "" + blobURLParts.BlobName = "" + blobURLParts.Snapshot = "" + blobURLParts.VersionID = "" + bsc := common.CreateBlobServiceClient(blobURLParts.String(), *credential, &common.CredentialOpOptions{LogError: glcm.Info}, createClientOptions(logLevel, nil, nil)) includeDirectoryStubs = true // DFS is supposed to feed folders in - if burl.ContainerName == "" || strings.Contains(burl.ContainerName, "*") { + if containerName == "" || strings.Contains(containerName, "*") { if !recursive { return nil, errors.New(accountTraversalInherentlyRecursiveError) } - - output = newBlobAccountTraverser(&blobResourceURL, blobPipeline, *ctx, includeDirectoryStubs, incrementEnumerationCounter, s2sPreserveBlobTags, cpkOptions, preservePermissions, true) + output = newBlobAccountTraverser(bsc, containerName, *ctx, includeDirectoryStubs, incrementEnumerationCounter, s2sPreserveBlobTags, cpkOptions, preservePermissions, true) } else if listOfVersionIds != nil { - output = newBlobVersionsTraverser(&blobResourceURL, blobPipeline, *ctx, recursive, includeDirectoryStubs, incrementEnumerationCounter, listOfVersionIds, cpkOptions) + output = newBlobVersionsTraverser(r, bsc, *ctx, includeDirectoryStubs, incrementEnumerationCounter, listOfVersionIds, cpkOptions) } else { - output = newBlobTraverser(&blobResourceURL, blobPipeline, *ctx, recursive, includeDirectoryStubs, incrementEnumerationCounter, s2sPreserveBlobTags, cpkOptions, includeDeleted, includeSnapshot, includeVersion, preservePermissions, true) + output = newBlobTraverser(r, bsc, *ctx, recursive, includeDirectoryStubs, incrementEnumerationCounter, s2sPreserveBlobTags, cpkOptions, includeDeleted, includeSnapshot, includeVersion, preservePermissions, true) } case common.ELocation.S3(): resourceURL, err := resource.FullURL() @@ -804,4 +820,4 @@ func getObjectNameOnly(fullPath string) (nameOnly string) { } return -} +} \ No newline at end of file diff --git a/cmd/zc_filter.go b/cmd/zc_filter.go index eaa45e6e2..f5d48f8cb 100644 --- a/cmd/zc_filter.go +++ b/cmd/zc_filter.go @@ -22,24 +22,23 @@ package cmd import ( "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "path" "regexp" "strings" "time" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" - "github.com/Azure/azure-storage-azcopy/v10/common" ) +const ISO8601 = "2006-01-02T15:04:05.0000000Z" // must have 0's for fractional seconds, because Files Service requires fixed width // Design explanation: /* Blob type exclusion is required as a part of the copy enumerators refactor. This would be used in Download and S2S scenarios. This map is used effectively as a hash set. If an item exists in the set, it does not pass the filter. */ type excludeBlobTypeFilter struct { - blobTypes map[azblob.BlobType]bool + blobTypes map[blob.BlobType]bool } func (f *excludeBlobTypeFilter) DoesSupportThisOS() (msg string, supported bool) { @@ -401,7 +400,7 @@ func parseISO8601(s string, chooseEarliest bool) (time.Time, error) { // list of ISO-8601 Go-lang formats in descending order of completeness formats := []string{ - azfile.ISO8601, // Support AzFile's more accurate format + ISO8601, // Support AzFile's more accurate format "2006-01-02T15:04:05Z07:00", // equal to time.RFC3339, which in Go parsing is basically "ISO 8601 with nothing optional" "2006-01-02T15:04:05", // no timezone "2006-01-02T15:04", // no seconds diff --git a/cmd/zc_newobjectadapters.go b/cmd/zc_newobjectadapters.go index a4587a3d0..45b756915 100644 --- a/cmd/zc_newobjectadapters.go +++ b/cmd/zc_newobjectadapters.go @@ -21,13 +21,18 @@ package cmd import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/lease" + sharedirectory "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/directory" + sharefile "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" + "time" ) var noContentProps = emptyPropertiesAdapter{} var noBlobProps = emptyPropertiesAdapter{} -var noMetdata common.Metadata = nil +var noMetadata common.Metadata = nil // emptyPropertiesAdapter supplies empty (zero-like) values // for all methods in contentPropsProvider and blobPropsProvider @@ -57,96 +62,227 @@ func (e emptyPropertiesAdapter) ContentMD5() []byte { return make([]byte, 0) } -func (e emptyPropertiesAdapter) BlobType() azblob.BlobType { - return azblob.BlobNone +func (e emptyPropertiesAdapter) BlobType() blob.BlobType { + return "" } -func (e emptyPropertiesAdapter) AccessTier() azblob.AccessTierType { - return azblob.AccessTierNone +func (e emptyPropertiesAdapter) AccessTier() blob.AccessTier { + return "" } -func (e emptyPropertiesAdapter) ArchiveStatus() azblob.ArchiveStatusType { - return azblob.ArchiveStatusNone +func (e emptyPropertiesAdapter) ArchiveStatus() blob.ArchiveStatus { + return "" } -func (e emptyPropertiesAdapter) LeaseDuration() azblob.LeaseDurationType { - return azblob.LeaseDurationNone +func (e emptyPropertiesAdapter) LeaseDuration() lease.DurationType { + return "" } -func (e emptyPropertiesAdapter) LeaseState() azblob.LeaseStateType { - return azblob.LeaseStateNone +func (e emptyPropertiesAdapter) LeaseState() lease.StateType { + return "" } -func (e emptyPropertiesAdapter) LeaseStatus() azblob.LeaseStatusType { - return azblob.LeaseStatusNone +func (e emptyPropertiesAdapter) LeaseStatus() lease.StatusType { + return "" } // blobPropertiesResponseAdapter adapts a BlobGetPropertiesResponse to the blobPropsProvider interface type blobPropertiesResponseAdapter struct { - *azblob.BlobGetPropertiesResponse + *blob.GetPropertiesResponse +} + +func (a blobPropertiesResponseAdapter) CacheControl() string { + return common.IffNotNil(a.GetPropertiesResponse.CacheControl, "") +} + +func (a blobPropertiesResponseAdapter) ContentDisposition() string { + return common.IffNotNil(a.GetPropertiesResponse.ContentDisposition, "") +} + +func (a blobPropertiesResponseAdapter) ContentEncoding() string { + return common.IffNotNil(a.GetPropertiesResponse.ContentEncoding, "") +} + +func (a blobPropertiesResponseAdapter) ContentLanguage() string { + return common.IffNotNil(a.GetPropertiesResponse.ContentLanguage, "") +} + +func (a blobPropertiesResponseAdapter) ContentType() string { + return common.IffNotNil(a.GetPropertiesResponse.ContentType, "") +} + +func (a blobPropertiesResponseAdapter) ContentMD5() []byte { + return a.GetPropertiesResponse.ContentMD5 +} + +func (a blobPropertiesResponseAdapter) BlobType() blob.BlobType { + return common.IffNotNil(a.GetPropertiesResponse.BlobType, "") } -func (a blobPropertiesResponseAdapter) AccessTier() azblob.AccessTierType { - return azblob.AccessTierType(a.BlobGetPropertiesResponse.AccessTier()) +func (a blobPropertiesResponseAdapter) AccessTier() blob.AccessTier { + return blob.AccessTier(common.IffNotNil(a.GetPropertiesResponse.AccessTier, "")) } -func (a blobPropertiesResponseAdapter) ArchiveStatus() azblob.ArchiveStatusType { - return azblob.ArchiveStatusType(a.BlobGetPropertiesResponse.ArchiveStatus()) +func (a blobPropertiesResponseAdapter) ArchiveStatus() blob.ArchiveStatus { + return blob.ArchiveStatus(common.IffNotNil(a.GetPropertiesResponse.ArchiveStatus, "")) +} + +// LeaseDuration returns the value for header x-ms-lease-duration. +func (a blobPropertiesResponseAdapter) LeaseDuration() lease.DurationType { + return common.IffNotNil(a.GetPropertiesResponse.LeaseDuration, "") +} + +// LeaseState returns the value for header x-ms-lease-state. +func (a blobPropertiesResponseAdapter) LeaseState() lease.StateType { + return common.IffNotNil(a.GetPropertiesResponse.LeaseState, "") +} + +// LeaseStatus returns the value for header x-ms-lease-status. +func (a blobPropertiesResponseAdapter) LeaseStatus() lease.StatusType { + return common.IffNotNil(a.GetPropertiesResponse.LeaseStatus, "") } // blobPropertiesAdapter adapts a BlobProperties object to both the // contentPropsProvider and blobPropsProvider interfaces type blobPropertiesAdapter struct { - BlobProperties azblob.BlobPropertiesInternal + BlobProperties *container.BlobProperties } func (a blobPropertiesAdapter) CacheControl() string { - return common.IffStringNotNil(a.BlobProperties.CacheControl, "") + return common.IffNotNil(a.BlobProperties.CacheControl, "") } func (a blobPropertiesAdapter) ContentDisposition() string { - return common.IffStringNotNil(a.BlobProperties.ContentDisposition, "") + return common.IffNotNil(a.BlobProperties.ContentDisposition, "") } func (a blobPropertiesAdapter) ContentEncoding() string { - return common.IffStringNotNil(a.BlobProperties.ContentEncoding, "") + return common.IffNotNil(a.BlobProperties.ContentEncoding, "") } func (a blobPropertiesAdapter) ContentLanguage() string { - return common.IffStringNotNil(a.BlobProperties.ContentLanguage, "") + return common.IffNotNil(a.BlobProperties.ContentLanguage, "") } func (a blobPropertiesAdapter) ContentType() string { - return common.IffStringNotNil(a.BlobProperties.ContentType, "") + return common.IffNotNil(a.BlobProperties.ContentType, "") } func (a blobPropertiesAdapter) ContentMD5() []byte { return a.BlobProperties.ContentMD5 } -func (a blobPropertiesAdapter) BlobType() azblob.BlobType { - return a.BlobProperties.BlobType +func (a blobPropertiesAdapter) BlobType() blob.BlobType { + return common.IffNotNil(a.BlobProperties.BlobType, "") } -func (a blobPropertiesAdapter) AccessTier() azblob.AccessTierType { - return a.BlobProperties.AccessTier +func (a blobPropertiesAdapter) AccessTier() blob.AccessTier { + return common.IffNotNil(a.BlobProperties.AccessTier, "") } // LeaseDuration returns the value for header x-ms-lease-duration. -func (a blobPropertiesAdapter) LeaseDuration() azblob.LeaseDurationType { - return a.BlobProperties.LeaseDuration +func (a blobPropertiesAdapter) LeaseDuration() lease.DurationType { + return common.IffNotNil(a.BlobProperties.LeaseDuration, "") } // LeaseState returns the value for header x-ms-lease-state. -func (a blobPropertiesAdapter) LeaseState() azblob.LeaseStateType { - return a.BlobProperties.LeaseState +func (a blobPropertiesAdapter) LeaseState() lease.StateType { + return common.IffNotNil(a.BlobProperties.LeaseState, "") } // LeaseStatus returns the value for header x-ms-lease-status. -func (a blobPropertiesAdapter) LeaseStatus() azblob.LeaseStatusType { - return a.BlobProperties.LeaseStatus +func (a blobPropertiesAdapter) LeaseStatus() lease.StatusType { + return common.IffNotNil(a.BlobProperties.LeaseStatus, "") +} + +func (a blobPropertiesAdapter) ArchiveStatus() blob.ArchiveStatus { + return common.IffNotNil(a.BlobProperties.ArchiveStatus, "") +} + +type shareFilePropertiesAdapter struct { + *sharefile.GetPropertiesResponse +} + +func (a shareFilePropertiesAdapter) Metadata() common.Metadata { + return a.GetPropertiesResponse.Metadata +} + +func (a shareFilePropertiesAdapter) LastModified() time.Time { + return common.IffNotNil(a.GetPropertiesResponse.LastModified, time.Time{}) +} + +func (a shareFilePropertiesAdapter) FileLastWriteTime() time.Time { + return common.IffNotNil(a.GetPropertiesResponse.FileLastWriteTime, time.Time{}) } -func (a blobPropertiesAdapter) ArchiveStatus() azblob.ArchiveStatusType { - return a.BlobProperties.ArchiveStatus +func (a shareFilePropertiesAdapter) CacheControl() string { + return common.IffNotNil(a.GetPropertiesResponse.CacheControl, "") } + +func (a shareFilePropertiesAdapter) ContentDisposition() string { + return common.IffNotNil(a.GetPropertiesResponse.ContentDisposition, "") +} + +func (a shareFilePropertiesAdapter) ContentEncoding() string { + return common.IffNotNil(a.GetPropertiesResponse.ContentEncoding, "") +} + +func (a shareFilePropertiesAdapter) ContentLanguage() string { + return common.IffNotNil(a.GetPropertiesResponse.ContentLanguage, "") +} + +func (a shareFilePropertiesAdapter) ContentType() string { + return common.IffNotNil(a.GetPropertiesResponse.ContentType, "") +} + +func (a shareFilePropertiesAdapter) ContentMD5() []byte { + return a.GetPropertiesResponse.ContentMD5 +} + +func (a shareFilePropertiesAdapter) ContentLength() int64 { + return common.IffNotNil(a.GetPropertiesResponse.ContentLength, 0) +} + +type shareDirectoryPropertiesAdapter struct { + *sharedirectory.GetPropertiesResponse +} + +func (a shareDirectoryPropertiesAdapter) Metadata() common.Metadata { + return a.GetPropertiesResponse.Metadata +} + +func (a shareDirectoryPropertiesAdapter) LastModified() time.Time { + return common.IffNotNil(a.GetPropertiesResponse.LastModified, time.Time{}) +} + +func (a shareDirectoryPropertiesAdapter) FileLastWriteTime() time.Time { + return common.IffNotNil(a.GetPropertiesResponse.FileLastWriteTime, time.Time{}) +} + +func (a shareDirectoryPropertiesAdapter) CacheControl() string { + return "" +} + +func (a shareDirectoryPropertiesAdapter) ContentDisposition() string { + return "" +} + +func (a shareDirectoryPropertiesAdapter) ContentEncoding() string { + return "" +} + +func (a shareDirectoryPropertiesAdapter) ContentLanguage() string { + return "" +} + +func (a shareDirectoryPropertiesAdapter) ContentType() string { + return "" +} + +func (a shareDirectoryPropertiesAdapter) ContentMD5() []byte { + return make([]byte, 0) +} + +func (a shareDirectoryPropertiesAdapter) ContentLength() int64 { + return 0 +} \ No newline at end of file diff --git a/cmd/zc_pipeline_init.go b/cmd/zc_pipeline_init.go index 51363bea8..416a0a9eb 100644 --- a/cmd/zc_pipeline_init.go +++ b/cmd/zc_pipeline_init.go @@ -14,12 +14,10 @@ func InitPipeline(ctx context.Context, location common.Location, credential comm common.ELocation.Benchmark(): // Gracefully return return nil, nil - case common.ELocation.Blob(): - p, err = createBlobPipeline(ctx, credential, logLevel) - case common.ELocation.File(): - p, err = createFilePipeline(ctx, credential, logLevel, trailingDot, from) case common.ELocation.BlobFS(): p, err = createBlobFSPipeline(ctx, credential, logLevel) + case common.ELocation.File():// Pipelines no longer used for Blob or File since moving to Track 2. + case common.ELocation.Blob(): case common.ELocation.S3(): case common.ELocation.GCP(): // Gracefully return because pipelines aren't used for S3 or GCP diff --git a/cmd/zc_processor.go b/cmd/zc_processor.go index 9dc499064..5b6bf3088 100644 --- a/cmd/zc_processor.go +++ b/cmd/zc_processor.go @@ -85,7 +85,7 @@ func (s *copyTransferProcessor) scheduleCopyTransfer(storedObject StoredObject) if len(metadataString) > 0 { for _, keyAndValue := range strings.Split(metadataString, ";") { // key/value pairs are separated by ';' kv := strings.Split(keyAndValue, "=") // key/value are separated by '=' - metadataMap[kv[0]] = kv[1] + metadataMap[kv[0]] = &kv[1] } } copyTransfer.Metadata = metadataMap diff --git a/cmd/zc_traverser_benchmark.go b/cmd/zc_traverser_benchmark.go index ca9d2a6eb..be6e15389 100644 --- a/cmd/zc_traverser_benchmark.go +++ b/cmd/zc_traverser_benchmark.go @@ -89,7 +89,7 @@ func (t *benchmarkTraverser) Traverse(preprocessor objectMorpher, processor obje t.bytesPerFile, noContentProps, noBlobProps, - noMetdata, + noMetadata, ""), processor) _, err = getProcessingError(err) if err != nil { diff --git a/cmd/zc_traverser_blob.go b/cmd/zc_traverser_blob.go index ad0e4a49f..71db83680 100644 --- a/cmd/zc_traverser_blob.go +++ b/cmd/zc_traverser_blob.go @@ -23,6 +23,11 @@ package cmd import ( "context" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" "net/url" "strings" "time" @@ -30,7 +35,6 @@ import ( "github.com/Azure/azure-storage-azcopy/v10/common/parallel" "github.com/Azure/azure-pipeline-go/pipeline" - "github.com/Azure/azure-storage-blob-go/azblob" "github.com/pkg/errors" "github.com/Azure/azure-storage-azcopy/v10/common" @@ -38,10 +42,10 @@ import ( // allow us to iterate through a path pointing to the blob endpoint type blobTraverser struct { - rawURL *url.URL - p pipeline.Pipeline - ctx context.Context - recursive bool + rawURL string + serviceClient *service.Client + ctx context.Context + recursive bool // parallel listing employs the hierarchical listing API which is more expensive // cx should have the option to disable this optimization in the name of saving costs @@ -86,22 +90,24 @@ func (t *blobTraverser) IsDirectory(isSource bool) (bool, error) { _, _, isDirStub, blobErr := t.getPropertiesIfSingleBlob() - if stgErr, ok := blobErr.(azblob.StorageError); ok { - // We know for sure this is a single blob still, let it walk on through to the traverser. - if stgErr.ServiceCode() == common.CPK_ERROR_SERVICE_CODE { - return false, nil - } + // We know for sure this is a single blob still, let it walk on through to the traverser. + if bloberror.HasCode(blobErr, bloberror.BlobUsesCustomerSpecifiedEncryption) { + return false, nil } if blobErr == nil { return isDirStub, nil } - blobURLParts := azblob.NewBlobURLParts(*t.rawURL) - containerRawURL := copyHandlerUtil{}.getContainerUrl(blobURLParts) - containerURL := azblob.NewContainerURL(containerRawURL, t.p) + blobURLParts, err := blob.ParseURL(t.rawURL) + if err != nil { + return false, err + } + containerClient := t.serviceClient.NewContainerClient(blobURLParts.ContainerName) searchPrefix := strings.TrimSuffix(blobURLParts.BlobName, common.AZCOPY_PATH_SEPARATOR_STRING) + common.AZCOPY_PATH_SEPARATOR_STRING - resp, err := containerURL.ListBlobsFlatSegment(t.ctx, azblob.Marker{}, azblob.ListBlobsSegmentOptions{Prefix: searchPrefix, MaxResults: 1}) + maxResults := int32(1) + pager := containerClient.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{Prefix: &searchPrefix, MaxResults: &maxResults}) + resp, err := pager.NextPage(t.ctx) if err != nil { if azcopyScanningLogger != nil { msg := fmt.Sprintf("Failed to check if the destination is a folder or a file (Azure Files). Assuming the destination is a file: %s", err) @@ -111,12 +117,10 @@ func (t *blobTraverser) IsDirectory(isSource bool) (bool, error) { } if len(resp.Segment.BlobItems) == 0 { - //Not a directory - if stgErr, ok := blobErr.(azblob.StorageError); ok { - // if the blob is not found return the error to throw - if stgErr.ServiceCode() == common.BLOB_NOT_FOUND { - return false, errors.New(common.FILE_NOT_FOUND) - } + // Not a directory + // If the blob is not found return the error to throw + if bloberror.HasCode(blobErr, bloberror.BlobNotFound) { + return false, errors.New(common.FILE_NOT_FOUND) } return false, blobErr } @@ -124,75 +128,83 @@ func (t *blobTraverser) IsDirectory(isSource bool) (bool, error) { return true, nil } -func (t *blobTraverser) getPropertiesIfSingleBlob() (props *azblob.BlobGetPropertiesResponse, isBlob bool, isDirStub bool, err error) { +func (t *blobTraverser) getPropertiesIfSingleBlob() (response *blob.GetPropertiesResponse, isBlob bool, isDirStub bool, err error) { // trim away the trailing slash before we check whether it's a single blob // so that we can detect the directory stub in case there is one - blobUrlParts := azblob.NewBlobURLParts(*t.rawURL) - blobUrlParts.BlobName = strings.TrimSuffix(blobUrlParts.BlobName, common.AZCOPY_PATH_SEPARATOR_STRING) + blobURLParts, err := blob.ParseURL(t.rawURL) + if err != nil { + return nil, false, false, err + } + blobURLParts.BlobName = strings.TrimSuffix(blobURLParts.BlobName, common.AZCOPY_PATH_SEPARATOR_STRING) - if blobUrlParts.BlobName == "" { + if blobURLParts.BlobName == "" { // This is a container, which needs to be given a proper listing. return nil, false, false, nil } - // perform the check - blobURL := azblob.NewBlobURL(blobUrlParts.URL(), t.p) - clientProvidedKey := azblob.ClientProvidedKeyOptions{} - if t.cpkOptions.IsSourceEncrypted { - clientProvidedKey = common.GetClientProvidedKey(t.cpkOptions) + blobClient, err := createBlobClientFromServiceClient(blobURLParts, t.serviceClient) + if err != nil { + return nil, false, false, err } - props, err = blobURL.GetProperties(t.ctx, azblob.BlobAccessConditions{}, clientProvidedKey) + props, err := blobClient.GetProperties(t.ctx, &blob.GetPropertiesOptions{CPKInfo: t.cpkOptions.GetCPKInfo()}) // if there was no problem getting the properties, it means that we are looking at a single blob if err == nil { - if gCopyUtil.doesBlobRepresentAFolder(props.NewMetadata()) { - return props, false, true, nil + if gCopyUtil.doesBlobRepresentAFolder(props.Metadata) { + return &props, false, true, nil } - return props, true, false, err + return &props, true, false, err } return nil, false, false, err } func (t *blobTraverser) getBlobTags() (common.BlobTags, error) { - blobUrlParts := azblob.NewBlobURLParts(*t.rawURL) - blobUrlParts.BlobName = strings.TrimSuffix(blobUrlParts.BlobName, common.AZCOPY_PATH_SEPARATOR_STRING) + blobURLParts, err := blob.ParseURL(t.rawURL) + if err != nil { + return nil, err + } + blobURLParts.BlobName = strings.TrimSuffix(blobURLParts.BlobName, common.AZCOPY_PATH_SEPARATOR_STRING) // perform the check - blobURL := azblob.NewBlobURL(blobUrlParts.URL(), t.p) + blobClient, err := createBlobClientFromServiceClient(blobURLParts, t.serviceClient) + if err != nil { + return nil, err + } blobTagsMap := make(common.BlobTags) - blobGetTagsResp, err := blobURL.GetTags(t.ctx, nil) + blobGetTagsResp, err := blobClient.GetTags(t.ctx, nil) if err != nil { return blobTagsMap, err } for _, blobTag := range blobGetTagsResp.BlobTagSet { - blobTagsMap[url.QueryEscape(blobTag.Key)] = url.QueryEscape(blobTag.Value) + blobTagsMap[url.QueryEscape(*blobTag.Key)] = url.QueryEscape(*blobTag.Value) } return blobTagsMap, nil } func (t *blobTraverser) Traverse(preprocessor objectMorpher, processor objectProcessor, filters []ObjectFilter) (err error) { - blobUrlParts := azblob.NewBlobURLParts(*t.rawURL) + blobURLParts, err := blob.ParseURL(t.rawURL) + if err != nil { + return err + } // check if the url points to a single blob - blobProperties, isBlob, isDirStub, propErr := t.getPropertiesIfSingleBlob() + blobProperties, isBlob, isDirStub, err := t.getPropertiesIfSingleBlob() - if stgErr, ok := propErr.(azblob.StorageError); ok { + var respErr *azcore.ResponseError + if errors.As(err, &respErr) { // Don't error out unless it's a CPK error just yet // If it's a CPK error, we know it's a single blob and that we can't get the properties on it anyway. - if stgErr.ServiceCode() == common.CPK_ERROR_SERVICE_CODE { + if respErr.ErrorCode == string(bloberror.BlobUsesCustomerSpecifiedEncryption) { return errors.New("this blob uses customer provided encryption keys (CPK). At the moment, AzCopy does not support CPK-encrypted blobs. " + "If you wish to make use of this blob, we recommend using one of the Azure Storage SDKs") } - - if resp := stgErr.Response(); resp == nil { - return fmt.Errorf("cannot list files due to reason %s", stgErr) - } else { - if resp.StatusCode == 403 { // Some nature of auth error-- Whatever the user is pointing at, they don't have access to, regardless of whether it's a file or a dir stub. - return fmt.Errorf("cannot list files due to reason %s", stgErr) - } + if respErr.RawResponse == nil { + return fmt.Errorf("cannot list files due to reason %s", respErr) + } else if respErr.StatusCode == 403 { // Some nature of auth error-- Whatever the user is pointing at, they don't have access to, regardless of whether it's a file or a dir stub. + return fmt.Errorf("cannot list files due to reason %s", respErr) } } @@ -200,7 +212,7 @@ func (t *blobTraverser) Traverse(preprocessor objectMorpher, processor objectPro // 1. either we are targeting a single blob and the URL wasn't explicitly pointed to a virtual dir // 2. either we are scanning recursively with includeDirectoryStubs set to true, // then we add the stub blob that represents the directory - if (isBlob && !strings.HasSuffix(blobUrlParts.BlobName, common.AZCOPY_PATH_SEPARATOR_STRING)) || + if (isBlob && !strings.HasSuffix(blobURLParts.BlobName, common.AZCOPY_PATH_SEPARATOR_STRING)) || (t.includeDirectoryStubs && isDirStub && t.recursive) { // sanity checking so highlighting doesn't highlight things we're not worried about. if blobProperties == nil { @@ -209,20 +221,20 @@ func (t *blobTraverser) Traverse(preprocessor objectMorpher, processor objectPro if azcopyScanningLogger != nil { azcopyScanningLogger.Log(pipeline.LogDebug, "Detected the root as a blob.") - azcopyScanningLogger.Log(pipeline.LogDebug, fmt.Sprintf("Root entity type: %s", getEntityType(blobProperties.NewMetadata()))) + azcopyScanningLogger.Log(pipeline.LogDebug, fmt.Sprintf("Root entity type: %s", getEntityType(blobProperties.Metadata))) } storedObject := newStoredObject( preprocessor, - getObjectNameOnly(strings.TrimSuffix(blobUrlParts.BlobName, common.AZCOPY_PATH_SEPARATOR_STRING)), + getObjectNameOnly(strings.TrimSuffix(blobURLParts.BlobName, common.AZCOPY_PATH_SEPARATOR_STRING)), "", - getEntityType(blobProperties.NewMetadata()), - blobProperties.LastModified(), - blobProperties.ContentLength(), - blobProperties, + getEntityType(blobProperties.Metadata), + *blobProperties.LastModified, + *blobProperties.ContentLength, blobPropertiesResponseAdapter{blobProperties}, - common.FromAzBlobMetadataToCommonMetadata(blobProperties.NewMetadata()), // .NewMetadata() seems odd to call, but it does actually retrieve the metadata from the blob properties. - blobUrlParts.ContainerName, + blobPropertiesResponseAdapter{blobProperties}, + blobProperties.Metadata, + blobURLParts.ContainerName, ) if t.s2sPreserveSourceTags { @@ -245,7 +257,7 @@ func (t *blobTraverser) Traverse(preprocessor objectMorpher, processor objectPro if !t.includeDeleted && (isBlob || err != nil) { return err } - } else if blobUrlParts.BlobName == "" && t.preservePermissions.IsTruthy() { + } else if blobURLParts.BlobName == "" && t.preservePermissions.IsTruthy() { // if the root is a container and we're copying "folders", we should persist the ACLs there too. if azcopyScanningLogger != nil { azcopyScanningLogger.Log(pipeline.LogDebug, "Detected the root as a container.") @@ -261,7 +273,7 @@ func (t *blobTraverser) Traverse(preprocessor objectMorpher, processor objectPro noContentProps, noBlobProps, common.Metadata{}, - blobUrlParts.ContainerName, + blobURLParts.ContainerName, ) if t.incrementEnumerationCounter != nil { @@ -276,13 +288,12 @@ func (t *blobTraverser) Traverse(preprocessor objectMorpher, processor objectPro } // get the container URL so that we can list the blobs - containerRawURL := copyHandlerUtil{}.getContainerUrl(blobUrlParts) - containerURL := azblob.NewContainerURL(containerRawURL, t.p) + containerClient := t.serviceClient.NewContainerClient(blobURLParts.ContainerName) // get the search prefix to aid in the listing // example: for a url like https://test.blob.core.windows.net/test/foo/bar/bla // the search prefix would be foo/bar/bla - searchPrefix := blobUrlParts.BlobName + searchPrefix := blobURLParts.BlobName // append a slash if it is not already present // example: foo/bar/bla becomes foo/bar/bla/ so that we only list children of the virtual directory @@ -294,63 +305,64 @@ func (t *blobTraverser) Traverse(preprocessor objectMorpher, processor objectPro extraSearchPrefix := FilterSet(filters).GetEnumerationPreFilter(t.recursive) if t.parallelListing { - return t.parallelList(containerURL, blobUrlParts.ContainerName, searchPrefix, extraSearchPrefix, preprocessor, processor, filters) + return t.parallelList(containerClient, blobURLParts.ContainerName, searchPrefix, extraSearchPrefix, preprocessor, processor, filters) } - return t.serialList(containerURL, blobUrlParts.ContainerName, searchPrefix, extraSearchPrefix, preprocessor, processor, filters) + return t.serialList(containerClient, blobURLParts.ContainerName, searchPrefix, extraSearchPrefix, preprocessor, processor, filters) } -func (t *blobTraverser) parallelList(containerURL azblob.ContainerURL, containerName string, searchPrefix string, +func (t *blobTraverser) parallelList(containerClient *container.Client, containerName string, searchPrefix string, extraSearchPrefix string, preprocessor objectMorpher, processor objectProcessor, filters []ObjectFilter) error { // Define how to enumerate its contents // This func must be thread safe/goroutine safe enumerateOneDir := func(dir parallel.Directory, enqueueDir func(parallel.Directory), enqueueOutput func(parallel.DirectoryEntry, error)) error { currentDirPath := dir.(string) - for marker := (azblob.Marker{}); marker.NotDone(); { - lResp, err := containerURL.ListBlobsHierarchySegment(t.ctx, marker, "/", azblob.ListBlobsSegmentOptions{Prefix: currentDirPath, - Details: azblob.BlobListingDetails{Metadata: true, Tags: t.s2sPreserveSourceTags, Deleted: t.includeDeleted, Snapshots: t.includeSnapshot, Versions: t.includeVersion}}) + pager := containerClient.NewListBlobsHierarchyPager("/", &container.ListBlobsHierarchyOptions{ + Prefix: ¤tDirPath, + Include: container.ListBlobsInclude{Metadata: true, Tags: t.s2sPreserveSourceTags, Deleted: t.includeDeleted, Snapshots: t.includeSnapshot, Versions: t.includeVersion}, + }) + var marker *string + for pager.More() { + lResp, err := pager.NextPage(t.ctx) if err != nil { return fmt.Errorf("cannot list files due to reason %s", err) } - // queue up the sub virtual directories if recursive is true if t.recursive { for _, virtualDir := range lResp.Segment.BlobPrefixes { - enqueueDir(virtualDir.Name) + enqueueDir(*virtualDir.Name) if azcopyScanningLogger != nil { - azcopyScanningLogger.Log(pipeline.LogDebug, fmt.Sprintf("Enqueuing sub-directory %s for enumeration.", virtualDir.Name)) + azcopyScanningLogger.Log(pipeline.LogDebug, fmt.Sprintf("Enqueuing sub-directory %s for enumeration.", *virtualDir.Name)) } if t.includeDirectoryStubs { // try to get properties on the directory itself, since it's not listed in BlobItems - fblobURL := containerURL.NewBlobURL(strings.TrimSuffix(virtualDir.Name, common.AZCOPY_PATH_SEPARATOR_STRING)) - resp, err := fblobURL.GetProperties(t.ctx, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) - folderRelativePath := strings.TrimSuffix(virtualDir.Name, common.AZCOPY_PATH_SEPARATOR_STRING) + blobClient := containerClient.NewBlobClient(strings.TrimSuffix(*virtualDir.Name, common.AZCOPY_PATH_SEPARATOR_STRING)) + pResp, err := blobClient.GetProperties(t.ctx, nil) + folderRelativePath := strings.TrimSuffix(*virtualDir.Name, common.AZCOPY_PATH_SEPARATOR_STRING) folderRelativePath = strings.TrimPrefix(folderRelativePath, searchPrefix) if err == nil { storedObject := newStoredObject( preprocessor, - getObjectNameOnly(strings.TrimSuffix(virtualDir.Name, common.AZCOPY_PATH_SEPARATOR_STRING)), + getObjectNameOnly(strings.TrimSuffix(*virtualDir.Name, common.AZCOPY_PATH_SEPARATOR_STRING)), folderRelativePath, common.EEntityType.Folder(), - resp.LastModified(), - resp.ContentLength(), - resp, - blobPropertiesResponseAdapter{resp}, - common.FromAzBlobMetadataToCommonMetadata(resp.NewMetadata()), + *pResp.LastModified, + *pResp.ContentLength, + blobPropertiesResponseAdapter{&pResp}, + blobPropertiesResponseAdapter{&pResp}, + pResp.Metadata, containerName, ) - storedObject.archiveStatus = azblob.ArchiveStatusType(resp.ArchiveStatus()) if t.s2sPreserveSourceTags { - var BlobTags *azblob.BlobTags - BlobTags, err = fblobURL.GetTags(t.ctx, nil) + tResp, err := blobClient.GetTags(t.ctx, nil) if err == nil { blobTagsMap := common.BlobTags{} - for _, blobTag := range BlobTags.BlobTagSet { - blobTagsMap[url.QueryEscape(blobTag.Key)] = url.QueryEscape(blobTag.Value) + for _, blobTag := range tResp.BlobTagSet { + blobTagsMap[url.QueryEscape(*blobTag.Key)] = url.QueryEscape(*blobTag.Value) } storedObject.blobTags = blobTagsMap } @@ -369,12 +381,12 @@ func (t *blobTraverser) parallelList(containerURL azblob.ContainerURL, container continue } - storedObject := t.createStoredObjectForBlob(preprocessor, blobInfo, strings.TrimPrefix(blobInfo.Name, searchPrefix), containerName) + storedObject := t.createStoredObjectForBlob(preprocessor, blobInfo, strings.TrimPrefix(*blobInfo.Name, searchPrefix), containerName) if t.s2sPreserveSourceTags && blobInfo.BlobTags != nil { blobTagsMap := common.BlobTags{} for _, blobTag := range blobInfo.BlobTags.BlobTagSet { - blobTagsMap[url.QueryEscape(blobTag.Key)] = url.QueryEscape(blobTag.Value) + blobTagsMap[url.QueryEscape(*blobTag.Key)] = url.QueryEscape(*blobTag.Value) } storedObject.blobTags = blobTagsMap } @@ -385,23 +397,22 @@ func (t *blobTraverser) parallelList(containerURL azblob.ContainerURL, container // if debug mode is on, note down the result, this is not going to be fast if azcopyScanningLogger != nil && azcopyScanningLogger.ShouldLog(pipeline.LogDebug) { tokenValue := "NONE" - if marker.Val != nil { - tokenValue = *marker.Val + if marker != nil { + tokenValue = *marker } var vdirListBuilder strings.Builder for _, virtualDir := range lResp.Segment.BlobPrefixes { - fmt.Fprintf(&vdirListBuilder, " %s,", virtualDir.Name) + fmt.Fprintf(&vdirListBuilder, " %s,", *virtualDir.Name) } var fileListBuilder strings.Builder for _, blobInfo := range lResp.Segment.BlobItems { - fmt.Fprintf(&fileListBuilder, " %s,", blobInfo.Name) + fmt.Fprintf(&fileListBuilder, " %s,", *blobInfo.Name) } msg := fmt.Sprintf("Enumerating %s with token %s. Sub-dirs:%s Files:%s", currentDirPath, tokenValue, vdirListBuilder.String(), fileListBuilder.String()) azcopyScanningLogger.Log(pipeline.LogDebug, msg) } - marker = lResp.NextMarker } return nil @@ -432,17 +443,23 @@ func (t *blobTraverser) parallelList(containerURL azblob.ContainerURL, container return nil } -func getEntityType(blobInfo azblob.Metadata) common.EntityType { - if _, isfolder := blobInfo["hdi_isfolder"]; isfolder { + +func getEntityType(metadata map[string]*string) common.EntityType { + // Note: We are just checking keys here, not their corresponding values. Is that safe? + if _, isfolder := metadata["hdi_isfolder"]; isfolder { return common.EEntityType.Folder() - } else if _, isSymlink := blobInfo["is_symlink"]; isSymlink { + } else if _, isfolder := metadata["Hdi_isfolder"]; isfolder { + return common.EEntityType.Folder() + } else if _, isSymlink := metadata["is_symlink"]; isSymlink { + return common.EEntityType.Symlink() + } else if _, isSymlink := metadata["Is_symlink"]; isSymlink { return common.EEntityType.Symlink() } return common.EEntityType.File() } -func (t *blobTraverser) createStoredObjectForBlob(preprocessor objectMorpher, blobInfo azblob.BlobItemInternal, relativePath string, containerName string) StoredObject { +func (t *blobTraverser) createStoredObjectForBlob(preprocessor objectMorpher, blobInfo *container.BlobItem, relativePath string, containerName string) StoredObject { adapter := blobPropertiesAdapter{blobInfo.Properties} if azcopyScanningLogger != nil { @@ -451,54 +468,54 @@ func (t *blobTraverser) createStoredObjectForBlob(preprocessor objectMorpher, bl object := newStoredObject( preprocessor, - getObjectNameOnly(blobInfo.Name), + getObjectNameOnly(*blobInfo.Name), relativePath, getEntityType(blobInfo.Metadata), - blobInfo.Properties.LastModified, + *blobInfo.Properties.LastModified, *blobInfo.Properties.ContentLength, adapter, adapter, // adapter satisfies both interfaces - common.FromAzBlobMetadataToCommonMetadata(blobInfo.Metadata), + blobInfo.Metadata, containerName, ) - object.blobDeleted = blobInfo.Deleted + object.blobDeleted = common.IffNotNil(blobInfo.Deleted, false) if t.includeDeleted && t.includeSnapshot { - object.blobSnapshotID = blobInfo.Snapshot + object.blobSnapshotID = common.IffNotNil(blobInfo.Snapshot, "") } else if t.includeDeleted && t.includeVersion && blobInfo.VersionID != nil { - object.blobVersionID = *blobInfo.VersionID + object.blobVersionID = common.IffNotNil(blobInfo.VersionID, "") } return object } -func (t *blobTraverser) doesBlobRepresentAFolder(metadata azblob.Metadata) bool { +func (t *blobTraverser) doesBlobRepresentAFolder(metadata map[string]*string) bool { util := copyHandlerUtil{} return util.doesBlobRepresentAFolder(metadata) && !(t.includeDirectoryStubs && t.recursive) } -func (t *blobTraverser) serialList(containerURL azblob.ContainerURL, containerName string, searchPrefix string, +func (t *blobTraverser) serialList(containerClient *container.Client, containerName string, searchPrefix string, extraSearchPrefix string, preprocessor objectMorpher, processor objectProcessor, filters []ObjectFilter) error { - for marker := (azblob.Marker{}); marker.NotDone(); { - // see the TO DO in GetEnumerationPreFilter if/when we make this more directory-aware - - // look for all blobs that start with the prefix - // Passing tags = true in the list call will save additional GetTags call - // TODO optimize for the case where recursive is off - listBlob, err := containerURL.ListBlobsFlatSegment(t.ctx, marker, - azblob.ListBlobsSegmentOptions{Prefix: searchPrefix + extraSearchPrefix, Details: azblob.BlobListingDetails{Metadata: true, Tags: t.s2sPreserveSourceTags, Deleted: t.includeDeleted, Snapshots: t.includeSnapshot, Versions: t.includeVersion}}) + // see the TO DO in GetEnumerationPreFilter if/when we make this more directory-aware + // TODO optimize for the case where recursive is off + prefix := searchPrefix + extraSearchPrefix + pager := containerClient.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{ + Prefix: &prefix, + Include: container.ListBlobsInclude{Metadata: true, Tags: t.s2sPreserveSourceTags, Deleted: t.includeDeleted, Snapshots: t.includeSnapshot, Versions: t.includeVersion}, + }) + for pager.More() { + resp, err := pager.NextPage(t.ctx) if err != nil { return fmt.Errorf("cannot list blobs. Failed with error %s", err.Error()) } - // process the blobs returned in this result segment - for _, blobInfo := range listBlob.Segment.BlobItems { + for _, blobInfo := range resp.Segment.BlobItems { // if the blob represents a hdi folder, then skip it if t.doesBlobRepresentAFolder(blobInfo.Metadata) { continue } - relativePath := strings.TrimPrefix(blobInfo.Name, searchPrefix) + relativePath := strings.TrimPrefix(*blobInfo.Name, searchPrefix) // if recursive if !t.recursive && strings.Contains(relativePath, common.AZCOPY_PATH_SEPARATOR_STRING) { continue @@ -510,7 +527,7 @@ func (t *blobTraverser) serialList(containerURL azblob.ContainerURL, containerNa if t.s2sPreserveSourceTags && blobInfo.BlobTags != nil { blobTagsMap := common.BlobTags{} for _, blobTag := range blobInfo.BlobTags.BlobTagSet { - blobTagsMap[url.QueryEscape(blobTag.Key)] = url.QueryEscape(blobTag.Value) + blobTagsMap[url.QueryEscape(*blobTag.Key)] = url.QueryEscape(*blobTag.Value) } storedObject.blobTags = blobTagsMap } @@ -525,17 +542,15 @@ func (t *blobTraverser) serialList(containerURL azblob.ContainerURL, containerNa return processErr } } - - marker = listBlob.NextMarker } return nil } -func newBlobTraverser(rawURL *url.URL, p pipeline.Pipeline, ctx context.Context, recursive, includeDirectoryStubs bool, incrementEnumerationCounter enumerationCounterFunc, s2sPreserveSourceTags bool, cpkOptions common.CpkOptions, includeDeleted, includeSnapshot, includeVersion bool, preservePermissions common.PreservePermissionsOption, isDFS bool) (t *blobTraverser) { +func newBlobTraverser(rawURL string, serviceClient *service.Client, ctx context.Context, recursive, includeDirectoryStubs bool, incrementEnumerationCounter enumerationCounterFunc, s2sPreserveSourceTags bool, cpkOptions common.CpkOptions, includeDeleted, includeSnapshot, includeVersion bool, preservePermissions common.PreservePermissionsOption, isDFS bool) (t *blobTraverser) { t = &blobTraverser{ rawURL: rawURL, - p: p, + serviceClient: serviceClient, ctx: ctx, recursive: recursive, includeDirectoryStubs: includeDirectoryStubs, @@ -547,7 +562,7 @@ func newBlobTraverser(rawURL *url.URL, p pipeline.Pipeline, ctx context.Context, includeSnapshot: includeSnapshot, includeVersion: includeVersion, preservePermissions: preservePermissions, - isDFS: isDFS, + isDFS: isDFS, } disableHierarchicalScanning := strings.ToLower(glcm.GetEnvironmentVariable(common.EEnvironmentVariable.DisableHierarchicalScanning())) @@ -564,3 +579,15 @@ func newBlobTraverser(rawURL *url.URL, p pipeline.Pipeline, ctx context.Context, } return } + +func createBlobClientFromServiceClient(blobURLParts blob.URLParts, client *service.Client) (*blob.Client, error) { + containerClient := client.NewContainerClient(blobURLParts.ContainerName) + blobClient := containerClient.NewBlobClient(blobURLParts.BlobName) + if blobURLParts.Snapshot != "" { + return blobClient.WithSnapshot(blobURLParts.Snapshot) + } + if blobURLParts.VersionID != "" { + return blobClient.WithVersionID(blobURLParts.VersionID) + } + return blobClient, nil +} diff --git a/cmd/zc_traverser_blob_account.go b/cmd/zc_traverser_blob_account.go index 43cde213c..98f6eb091 100644 --- a/cmd/zc_traverser_blob_account.go +++ b/cmd/zc_traverser_blob_account.go @@ -23,17 +23,13 @@ package cmd import ( "context" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" "github.com/Azure/azure-storage-azcopy/v10/common" - "net/url" - - "github.com/Azure/azure-pipeline-go/pipeline" - "github.com/Azure/azure-storage-blob-go/azblob" ) // Enumerates an entire blob account, looking into each matching container as it goes type blobAccountTraverser struct { - accountURL azblob.ServiceURL - p pipeline.Pipeline + serviceClient *service.Client ctx context.Context containerPattern string cachedContainers []string @@ -44,7 +40,7 @@ type blobAccountTraverser struct { s2sPreserveSourceTags bool - cpkOptions common.CpkOptions + cpkOptions common.CpkOptions preservePermissions common.PreservePermissionsOption isDFS bool @@ -57,20 +53,17 @@ func (t *blobAccountTraverser) IsDirectory(_ bool) (bool, error) { func (t *blobAccountTraverser) listContainers() ([]string, error) { // a nil list also returns 0 if len(t.cachedContainers) == 0 { - marker := azblob.Marker{} cList := make([]string, 0) - - for marker.NotDone() { - resp, err := t.accountURL.ListContainersSegment(t.ctx, marker, azblob.ListContainersSegmentOptions{}) - + pager := t.serviceClient.NewListContainersPager(nil) + for pager.More() { + resp, err := pager.NextPage(t.ctx) if err != nil { return nil, err } - for _, v := range resp.ContainerItems { // Match a pattern for the container name and the container name only. if t.containerPattern != "" { - if ok, err := containerNameMatchesPattern(v.Name, t.containerPattern); err != nil { + if ok, err := containerNameMatchesPattern(*v.Name, t.containerPattern); err != nil { // Break if the pattern is invalid return nil, err } else if !ok { @@ -79,10 +72,8 @@ func (t *blobAccountTraverser) listContainers() ([]string, error) { } } - cList = append(cList, v.Name) + cList = append(cList, *v.Name) } - - marker = resp.NextMarker } t.cachedContainers = cList @@ -102,8 +93,8 @@ func (t *blobAccountTraverser) Traverse(preprocessor objectMorpher, processor ob } for _, v := range cList { - containerURL := t.accountURL.NewContainerURL(v).URL() - containerTraverser := newBlobTraverser(&containerURL, t.p, t.ctx, true, t.includeDirectoryStubs, t.incrementEnumerationCounter, t.s2sPreserveSourceTags, t.cpkOptions, false, false, false, t.preservePermissions, t.isDFS) + containerURL := t.serviceClient.NewContainerClient(v).URL() + containerTraverser := newBlobTraverser(containerURL, t.serviceClient, t.ctx, true, t.includeDirectoryStubs, t.incrementEnumerationCounter, t.s2sPreserveSourceTags, t.cpkOptions, false, false, false, t.preservePermissions, t.isDFS) preprocessorForThisChild := preprocessor.FollowedBy(newContainerDecorator(v)) @@ -118,26 +109,17 @@ func (t *blobAccountTraverser) Traverse(preprocessor objectMorpher, processor ob return nil } -func newBlobAccountTraverser(rawURL *url.URL, p pipeline.Pipeline, ctx context.Context, includeDirectoryStubs bool, incrementEnumerationCounter enumerationCounterFunc, s2sPreserveSourceTags bool, cpkOptions common.CpkOptions, preservePermissions common.PreservePermissionsOption, isDFS bool) (t *blobAccountTraverser) { - bURLParts := azblob.NewBlobURLParts(*rawURL) - cPattern := bURLParts.ContainerName - - // Strip the container name away and treat it as a pattern - if bURLParts.ContainerName != "" { - bURLParts.ContainerName = "" - } - +func newBlobAccountTraverser(serviceClient *service.Client, container string, ctx context.Context, includeDirectoryStubs bool, incrementEnumerationCounter enumerationCounterFunc, s2sPreserveSourceTags bool, cpkOptions common.CpkOptions, preservePermissions common.PreservePermissionsOption, isDFS bool) (t *blobAccountTraverser) { t = &blobAccountTraverser{ - p: p, ctx: ctx, incrementEnumerationCounter: incrementEnumerationCounter, - accountURL: azblob.NewServiceURL(bURLParts.URL(), p), - containerPattern: cPattern, + serviceClient: serviceClient, + containerPattern: container, includeDirectoryStubs: includeDirectoryStubs, s2sPreserveSourceTags: s2sPreserveSourceTags, cpkOptions: cpkOptions, preservePermissions: preservePermissions, - isDFS: isDFS, + isDFS: isDFS, } return diff --git a/cmd/zc_traverser_blob_versions.go b/cmd/zc_traverser_blob_versions.go index c280a7964..6c6d10789 100644 --- a/cmd/zc_traverser_blob_versions.go +++ b/cmd/zc_traverser_blob_versions.go @@ -22,17 +22,16 @@ package cmd import ( "context" - "net/url" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" "strings" - "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" ) type blobVersionsTraverser struct { - rawURL *url.URL - p pipeline.Pipeline + rawURL string + serviceClient *service.Client ctx context.Context includeDirectoryStubs bool incrementEnumerationCounter enumerationCounterFunc @@ -53,24 +52,29 @@ func (t *blobVersionsTraverser) IsDirectory(isSource bool) (bool, error) { return false, nil } -func (t *blobVersionsTraverser) getBlobProperties(versionID string) (props *azblob.BlobGetPropertiesResponse, err error) { - blobURLParts := azblob.NewBlobURLParts(*t.rawURL) +func (t *blobVersionsTraverser) getBlobProperties(versionID string) (*blob.GetPropertiesResponse, error) { + blobURLParts, err := blob.ParseURL(t.rawURL) + if err != nil { + return nil, err + } blobURLParts.BlobName = strings.TrimSuffix(blobURLParts.BlobName, common.AZCOPY_PATH_SEPARATOR_STRING) if versionID != "" { blobURLParts.VersionID = versionID } - blobURL := azblob.NewBlobURL(blobURLParts.URL(), t.p) - clientProvidedKey := azblob.ClientProvidedKeyOptions{} - if t.cpkOptions.IsSourceEncrypted { - clientProvidedKey = common.GetClientProvidedKey(t.cpkOptions) + blobClient, err := createBlobClientFromServiceClient(blobURLParts, t.serviceClient) + if err != nil { + return nil, err } - props, err = blobURL.GetProperties(t.ctx, azblob.BlobAccessConditions{}, clientProvidedKey) - return props, err + props, err := blobClient.GetProperties(t.ctx, &blob.GetPropertiesOptions{CPKInfo: t.cpkOptions.GetCPKInfo()}) + return &props, err } func (t *blobVersionsTraverser) Traverse(preprocessor objectMorpher, processor objectProcessor, filters []ObjectFilter) (err error) { - blobURLParts := azblob.NewBlobURLParts(*t.rawURL) + blobURLParts, err := blob.ParseURL(t.rawURL) + if err != nil { + return err + } versionID, ok := <-t.listOfVersionIds for ; ok; versionID, ok = <-t.listOfVersionIds { @@ -89,11 +93,11 @@ func (t *blobVersionsTraverser) Traverse(preprocessor objectMorpher, processor o getObjectNameOnly(strings.TrimSuffix(blobURLParts.BlobName, common.AZCOPY_PATH_SEPARATOR_STRING)), "", common.EEntityType.File(), - blobProperties.LastModified(), - blobProperties.ContentLength(), - blobProperties, + *blobProperties.LastModified, + *blobProperties.ContentLength, + blobPropertiesResponseAdapter{blobProperties}, blobPropertiesResponseAdapter{blobProperties}, - common.FromAzBlobMetadataToCommonMetadata(blobProperties.NewMetadata()), + blobProperties.Metadata, blobURLParts.ContainerName, ) storedObject.blobVersionID = versionID @@ -110,13 +114,13 @@ func (t *blobVersionsTraverser) Traverse(preprocessor objectMorpher, processor o return nil } -func newBlobVersionsTraverser(rawURL *url.URL, p pipeline.Pipeline, ctx context.Context, - recursive, includeDirectoryStubs bool, incrementEnumerationCounter enumerationCounterFunc, +func newBlobVersionsTraverser(rawURL string, serviceClient *service.Client, ctx context.Context, + includeDirectoryStubs bool, incrementEnumerationCounter enumerationCounterFunc, listOfVersionIds chan string, cpkOptions common.CpkOptions) (t *blobVersionsTraverser) { return &blobVersionsTraverser{ rawURL: rawURL, - p: p, + serviceClient: serviceClient, ctx: ctx, includeDirectoryStubs: includeDirectoryStubs, incrementEnumerationCounter: incrementEnumerationCounter, diff --git a/cmd/zc_traverser_file.go b/cmd/zc_traverser_file.go index 5304940a7..65fd28f4f 100644 --- a/cmd/zc_traverser_file.go +++ b/cmd/zc_traverser_file.go @@ -23,24 +23,26 @@ package cmd import ( "context" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/directory" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" "github.com/Azure/azure-storage-azcopy/v10/common/parallel" - "net/url" "runtime" "strings" "time" "github.com/Azure/azure-pipeline-go/pipeline" - "github.com/Azure/azure-storage-file-go/azfile" - "github.com/Azure/azure-storage-azcopy/v10/common" ) const trailingDotErrMsg = "File share contains file/directory %s with a trailing dot but the trailing dot parameter was set to Disable, meaning these files could be potentially treated in an unsafe manner." +const invalidNameErrorMsg = "Skipping File share path %s, as it is not a valid Blob or Windows name. Rename the object and retry the transfer" // allow us to iterate through a path pointing to the file endpoint type fileTraverser struct { - rawURL *url.URL - p pipeline.Pipeline + rawURL string + serviceClient *service.Client ctx context.Context recursive bool getProperties bool @@ -51,20 +53,75 @@ type fileTraverser struct { destination *common.Location } +func createShareClientFromServiceClient(fileURLParts file.URLParts, client *service.Client) (*share.Client, error) { + shareClient := client.NewShareClient(fileURLParts.ShareName) + if fileURLParts.ShareSnapshot != "" { + return shareClient.WithSnapshot(fileURLParts.ShareSnapshot) + } + return shareClient, nil +} + +func createDirectoryClientFromServiceClient(fileURLParts file.URLParts, client *service.Client) (*directory.Client, error) { + shareClient, err := createShareClientFromServiceClient(fileURLParts, client) + if err != nil { + return nil, err + } + directoryClient := shareClient.NewDirectoryClient(fileURLParts.DirectoryOrFilePath) + return directoryClient, err +} + +func createFileClientFromServiceClient(fileURLParts file.URLParts, client *service.Client) (*file.Client, error) { + shareClient, err := createShareClientFromServiceClient(fileURLParts, client) + if err != nil { + return nil, err + } + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileURLParts.DirectoryOrFilePath) + return fileClient, err +} + func (t *fileTraverser) IsDirectory(bool) (bool, error) { - return copyHandlerUtil{}.urlIsAzureFileDirectory(t.ctx, t.rawURL, t.p), nil // This handles all of the fanciness for us. + // Azure file share case + if gCopyUtil.urlIsContainerOrVirtualDirectory(t.rawURL) { + return true, nil + } + + // Need make request to ensure if it's directory + fileURLParts, err := file.ParseURL(t.rawURL) + if err != nil { + return false, err + } + directoryClient, err := createDirectoryClientFromServiceClient(fileURLParts, t.serviceClient) + if err != nil { + return false, err + } + _, err = directoryClient.GetProperties(t.ctx, nil) + if err != nil { + if azcopyScanningLogger != nil { + azcopyScanningLogger.Log(pipeline.LogWarning, fmt.Sprintf("Failed to check if the destination is a folder or a file (Azure Files). Assuming the destination is a file: %s", err)) + } + return false, nil + } + + return true, nil } -func (t *fileTraverser) getPropertiesIfSingleFile() (*azfile.FileGetPropertiesResponse, bool) { - fileURL := azfile.NewFileURL(*t.rawURL, t.p) - fileProps, filePropertiesErr := fileURL.GetProperties(t.ctx) +func (t *fileTraverser) getPropertiesIfSingleFile() (*file.GetPropertiesResponse, bool, error) { + fileURLParts, err := file.ParseURL(t.rawURL) + if err != nil { + return nil, false, err + } + fileClient, err := createFileClientFromServiceClient(fileURLParts, t.serviceClient) + if err != nil { + return nil, false, err + } + fileProps, filePropertiesErr := fileClient.GetProperties(t.ctx, nil) // if there was no problem getting the properties, it means that we are looking at a single file if filePropertiesErr == nil { - return fileProps, true + return &fileProps, true, nil } - return nil, false + return nil, false, nil } func (t *fileTraverser) Traverse(preprocessor objectMorpher, processor objectProcessor, filters []ObjectFilter) (err error) { @@ -81,8 +138,10 @@ func (t *fileTraverser) Traverse(preprocessor objectMorpher, processor objectPro } return false } - invalidNameErrorMsg := "Skipping File share path %s, as it is not a valid Blob or Windows name. Rename the object and retry the transfer" - targetURLParts := azfile.NewFileURLParts(*t.rawURL) + targetURLParts, err := file.ParseURL(t.rawURL) + if err != nil { + return err + } // if not pointing to a share, check if we are pointing to a single file if targetURLParts.DirectoryOrFilePath != "" { @@ -94,7 +153,10 @@ func (t *fileTraverser) Traverse(preprocessor objectMorpher, processor objectPro azcopyScanningLogger.Log(pipeline.LogWarning, fmt.Sprintf(trailingDotErrMsg, getObjectNameOnly(targetURLParts.DirectoryOrFilePath))) } // check if the url points to a single file - fileProperties, isFile := t.getPropertiesIfSingleFile() + fileProperties, isFile, err := t.getPropertiesIfSingleFile() + if err != nil { + return err + } if isFile { if azcopyScanningLogger != nil { azcopyScanningLogger.Log(pipeline.LogDebug, "Detected the root as a file.") @@ -105,16 +167,15 @@ func (t *fileTraverser) Traverse(preprocessor objectMorpher, processor objectPro getObjectNameOnly(targetURLParts.DirectoryOrFilePath), "", common.EEntityType.File(), - fileProperties.LastModified(), - fileProperties.ContentLength(), - fileProperties, + *fileProperties.LastModified, + *fileProperties.ContentLength, + shareFilePropertiesAdapter{fileProperties}, noBlobProps, - common.FromAzFileMetadataToCommonMetadata(fileProperties.NewMetadata()), // .NewMetadata() seems odd to call here, but it does actually obtain the metadata. + fileProperties.Metadata, targetURLParts.ShareName, ) - smbLastWriteTime, _ := time.Parse(azfile.ISO8601, fileProperties.FileLastWriteTime()) // no need to worry about error since we'll only check against it if it's non-zero for sync - storedObject.smbLastModifiedTime = smbLastWriteTime + storedObject.smbLastModifiedTime = *fileProperties.FileLastWriteTime if t.incrementEnumerationCounter != nil { t.incrementEnumerationCounter(common.EEntityType.File()) @@ -132,21 +193,24 @@ func (t *fileTraverser) Traverse(preprocessor objectMorpher, processor objectPro convertToStoredObject := func(input parallel.InputObject) (parallel.OutputObject, error) { f := input.(azfileEntity) // compute the relative path of the file with respect to the target directory - fileURLParts := azfile.NewFileURLParts(f.url) - relativePath := strings.TrimPrefix(fileURLParts.DirectoryOrFilePath, targetURLParts.DirectoryOrFilePath) + fileURLParts, err := file.ParseURL(f.url) + if err != nil { + return nil, err + } + targetPath := strings.TrimSuffix(targetURLParts.DirectoryOrFilePath, common.AZCOPY_PATH_SEPARATOR_STRING) + relativePath := strings.TrimPrefix(fileURLParts.DirectoryOrFilePath, targetPath) relativePath = strings.TrimPrefix(relativePath, common.AZCOPY_PATH_SEPARATOR_STRING) size := f.contentLength - // We need to omit some properties if we don't get properties - lmt := time.Time{} - smbLMT := time.Time{} + var lmt time.Time + var smbLMT time.Time var contentProps contentPropsProvider = noContentProps - var meta common.Metadata = nil + var metadata common.Metadata // Only get the properties if we're told to if t.getProperties { - var fullProperties azfilePropertiesAdapter + var fullProperties filePropsProvider fullProperties, err = f.propertyGetter(t.ctx) if err != nil { return StoredObject{ @@ -154,17 +218,15 @@ func (t *fileTraverser) Traverse(preprocessor objectMorpher, processor objectPro }, err } lmt = fullProperties.LastModified() - smbLMT, _ = time.Parse(azfile.ISO8601, fullProperties.FileLastWriteTime()) - if f.entityType == common.EEntityType.File() { - contentProps = fullProperties.(*azfile.FileGetPropertiesResponse) // only files have content props. Folders don't. - // Get an up-to-date size, because it's documented that the size returned by the listing might not be up-to-date, - // if an SMB client has modified by not yet closed the file. (See https://docs.microsoft.com/en-us/rest/api/storageservices/list-directories-and-files) - // Doing this here makes sure that our size is just as up-to-date as our LMT . - // (If s2s-detect-source-changed is false, then this code won't run. If if its false, we don't check for modifications anyway, - // so it's fair to assume that the size will stay equal to that returned at by the listing operation) - size = fullProperties.(*azfile.FileGetPropertiesResponse).ContentLength() - } - meta = common.FromAzFileMetadataToCommonMetadata(fullProperties.NewMetadata()) + smbLMT = fullProperties.FileLastWriteTime() + contentProps = fullProperties + // Get an up-to-date size, because it's documented that the size returned by the listing might not be up-to-date, + // if an SMB client has modified by not yet closed the file. (See https://docs.microsoft.com/en-us/rest/api/storageservices/list-directories-and-files) + // Doing this here makes sure that our size is just as up-to-date as our LMT . + // (If s2s-detect-source-changed is false, then this code won't run. If if its false, we don't check for modifications anyway, + // so it's fair to assume that the size will stay equal to that returned at by the listing operation) + size = fullProperties.ContentLength() + metadata = fullProperties.Metadata() } obj := newStoredObject( preprocessor, @@ -175,7 +237,7 @@ func (t *fileTraverser) Traverse(preprocessor objectMorpher, processor objectPro size, contentProps, noBlobProps, - meta, + metadata, targetURLParts.ShareName, ) @@ -194,13 +256,16 @@ func (t *fileTraverser) Traverse(preprocessor objectMorpher, processor objectPro } // get the directory URL so that we can list the files - directoryURL := azfile.NewDirectoryURL(targetURLParts.URL(), t.p) + directoryClient, err := createDirectoryClientFromServiceClient(targetURLParts, t.serviceClient) + if err != nil { + return err + } // Our rule is that enumerators of folder-aware sources should include the root folder's properties. // So include the root dir/share in the enumeration results, if it exists or is just the share root. - _, err = directoryURL.GetProperties(t.ctx) + _, err = directoryClient.GetProperties(t.ctx, nil) if err == nil || targetURLParts.DirectoryOrFilePath == "" { - s, err := convertToStoredObject(newAzFileRootFolderEntity(directoryURL, "")) + s, err := convertToStoredObject(newAzFileRootDirectoryEntity(directoryClient, "")) if err != nil { return err } @@ -213,39 +278,41 @@ func (t *fileTraverser) Traverse(preprocessor objectMorpher, processor objectPro // Define how to enumerate its contents // This func must be threadsafe/goroutine safe enumerateOneDir := func(dir parallel.Directory, enqueueDir func(parallel.Directory), enqueueOutput func(parallel.DirectoryEntry, error)) error { - currentDirURL := dir.(azfile.DirectoryURL) - for marker := (azfile.Marker{}); marker.NotDone(); { - lResp, err := currentDirURL.ListFilesAndDirectoriesSegment(t.ctx, marker, azfile.ListFilesAndDirectoriesOptions{}) + currentDirectoryClient := dir.(*directory.Client) + pager := currentDirectoryClient.NewListFilesAndDirectoriesPager(nil) + var marker *string + for pager.More() { + lResp, err := pager.NextPage(t.ctx) if err != nil { return fmt.Errorf("cannot list files due to reason %s", err) } - for _, fileInfo := range lResp.FileItems { - if invalidBlobOrWindowsName(fileInfo.Name) { + for _, fileInfo := range lResp.Segment.Files { + if invalidBlobOrWindowsName(*fileInfo.Name) { //Throw a warning on console and continue - WarnStdoutAndScanningLog(fmt.Sprintf(invalidNameErrorMsg, fileInfo.Name)) + WarnStdoutAndScanningLog(fmt.Sprintf(invalidNameErrorMsg, *fileInfo.Name)) continue } else { - if t.trailingDot != common.ETrailingDotOption.Enable() && strings.HasSuffix(fileInfo.Name, ".") { - azcopyScanningLogger.Log(pipeline.LogWarning, fmt.Sprintf(trailingDotErrMsg, fileInfo.Name)) + if t.trailingDot != common.ETrailingDotOption.Enable() && strings.HasSuffix(*fileInfo.Name, ".") { + azcopyScanningLogger.Log(pipeline.LogWarning, fmt.Sprintf(trailingDotErrMsg, *fileInfo.Name)) } } - enqueueOutput(newAzFileFileEntity(currentDirURL, fileInfo), nil) + enqueueOutput(newAzFileFileEntity(currentDirectoryClient, fileInfo), nil) } - for _, dirInfo := range lResp.DirectoryItems { - if invalidBlobOrWindowsName(dirInfo.Name) { + for _, dirInfo := range lResp.Segment.Directories { + if invalidBlobOrWindowsName(*dirInfo.Name) { //Throw a warning on console and continue - WarnStdoutAndScanningLog(fmt.Sprintf(invalidNameErrorMsg, dirInfo.Name)) + WarnStdoutAndScanningLog(fmt.Sprintf(invalidNameErrorMsg, *dirInfo.Name)) continue } else { - if t.trailingDot != common.ETrailingDotOption.Enable() && strings.HasSuffix(dirInfo.Name, ".") { - azcopyScanningLogger.Log(pipeline.LogWarning, fmt.Sprintf(trailingDotErrMsg, dirInfo.Name)) + if t.trailingDot != common.ETrailingDotOption.Enable() && strings.HasSuffix(*dirInfo.Name, ".") { + azcopyScanningLogger.Log(pipeline.LogWarning, fmt.Sprintf(trailingDotErrMsg, *dirInfo.Name)) } } - enqueueOutput(newAzFileChildFolderEntity(currentDirURL, dirInfo.Name), nil) + enqueueOutput(newAzFileSubdirectoryEntity(currentDirectoryClient, *dirInfo.Name), nil) if t.recursive { // If recursive is turned on, add sub directories to be processed - enqueueDir(currentDirURL.NewDirectoryURL(dirInfo.Name)) + enqueueDir(currentDirectoryClient.NewSubdirectoryClient(*dirInfo.Name)) } } @@ -253,21 +320,25 @@ func (t *fileTraverser) Traverse(preprocessor objectMorpher, processor objectPro // if debug mode is on, note down the result, this is not going to be fast if azcopyScanningLogger != nil && azcopyScanningLogger.ShouldLog(pipeline.LogDebug) { tokenValue := "NONE" - if marker.Val != nil { - tokenValue = *marker.Val + if marker != nil { + tokenValue = *marker } var dirListBuilder strings.Builder - for _, dir := range lResp.DirectoryItems { - fmt.Fprintf(&dirListBuilder, " %s,", dir.Name) + for _, dir := range lResp.Segment.Directories { + fmt.Fprintf(&dirListBuilder, " %s,", *dir.Name) } var fileListBuilder strings.Builder - for _, fileInfo := range lResp.FileItems { - fmt.Fprintf(&fileListBuilder, " %s,", fileInfo.Name) + for _, fileInfo := range lResp.Segment.Files { + fmt.Fprintf(&fileListBuilder, " %s,", *fileInfo.Name) } - dirName := azfile.NewFileURLParts(currentDirURL.URL()).DirectoryOrFilePath - msg := fmt.Sprintf("Enumerating %s with token %s. Sub-dirs:%s Files:%s", dirName, + fileURLParts, err := file.ParseURL(currentDirectoryClient.URL()) + if err != nil { + return err + } + directoryName := fileURLParts.DirectoryOrFilePath + msg := fmt.Sprintf("Enumerating %s with token %s. Sub-dirs:%s Files:%s", directoryName, tokenValue, dirListBuilder.String(), fileListBuilder.String()) azcopyScanningLogger.Log(pipeline.LogDebug, msg) } @@ -284,7 +355,7 @@ func (t *fileTraverser) Traverse(preprocessor objectMorpher, processor objectPro workerContext, cancelWorkers := context.WithCancel(t.ctx) - cCrawled := parallel.Crawl(workerContext, directoryURL, enumerateOneDir, parallelism) + cCrawled := parallel.Crawl(workerContext, directoryClient, enumerateOneDir, parallelism) cTransformed := parallel.Transform(workerContext, cCrawled, convertToStoredObject, parallelism) @@ -312,8 +383,17 @@ func (t *fileTraverser) Traverse(preprocessor objectMorpher, processor objectPro return } -func newFileTraverser(rawURL *url.URL, p pipeline.Pipeline, ctx context.Context, recursive, getProperties bool, incrementEnumerationCounter enumerationCounterFunc, trailingDot common.TrailingDotOption, destination *common.Location) (t *fileTraverser) { - t = &fileTraverser{rawURL: rawURL, p: p, ctx: ctx, recursive: recursive, getProperties: getProperties, incrementEnumerationCounter: incrementEnumerationCounter, trailingDot: trailingDot, destination: destination} +func newFileTraverser(rawURL string, serviceClient *service.Client, ctx context.Context, recursive, getProperties bool, incrementEnumerationCounter enumerationCounterFunc, trailingDot common.TrailingDotOption, destination *common.Location) (t *fileTraverser) { + t = &fileTraverser{ + rawURL: rawURL, + serviceClient: serviceClient, + ctx: ctx, + recursive: recursive, + getProperties: getProperties, + incrementEnumerationCounter: incrementEnumerationCounter, + trailingDot: trailingDot, + destination: destination, + } return } @@ -321,40 +401,45 @@ func newFileTraverser(rawURL *url.URL, p pipeline.Pipeline, ctx context.Context, type azfileEntity struct { name string contentLength int64 - url url.URL - propertyGetter func(ctx context.Context) (azfilePropertiesAdapter, error) + url string + propertyGetter func(ctx context.Context) (filePropsProvider, error) entityType common.EntityType } -func newAzFileFileEntity(containingDir azfile.DirectoryURL, fileInfo azfile.FileItem) azfileEntity { - fu := containingDir.NewFileURL(fileInfo.Name) +func newAzFileFileEntity(parentDirectoryClient *directory.Client, fileInfo *directory.File) azfileEntity { + fileClient := parentDirectoryClient.NewFileClient(*fileInfo.Name) return azfileEntity{ - fileInfo.Name, - fileInfo.Properties.ContentLength, - fu.URL(), - func(ctx context.Context) (azfilePropertiesAdapter, error) { return fu.GetProperties(ctx) }, + *fileInfo.Name, + *fileInfo.Properties.ContentLength, + fileClient.URL(), + func(ctx context.Context) (filePropsProvider, error) { + fileProperties, err := fileClient.GetProperties(ctx, nil) + if err != nil { + return nil, err + } + return shareFilePropertiesAdapter{&fileProperties}, nil + }, common.EEntityType.File(), } } -func newAzFileChildFolderEntity(containingDir azfile.DirectoryURL, dirName string) azfileEntity { - du := containingDir.NewDirectoryURL(dirName) - return newAzFileRootFolderEntity(du, dirName) // now that we have du, the logic is same as if it was the root +func newAzFileSubdirectoryEntity(parentDirectoryClient *directory.Client, dirName string) azfileEntity { + directoryClient := parentDirectoryClient.NewSubdirectoryClient(dirName) + return newAzFileRootDirectoryEntity(directoryClient, dirName) // now that we have directoryClient, the logic is same as if it was the root } -func newAzFileRootFolderEntity(rootDir azfile.DirectoryURL, name string) azfileEntity { +func newAzFileRootDirectoryEntity(directoryClient *directory.Client, name string) azfileEntity { return azfileEntity{ name, 0, - rootDir.URL(), - func(ctx context.Context) (azfilePropertiesAdapter, error) { return rootDir.GetProperties(ctx) }, + directoryClient.URL(), + func(ctx context.Context) (filePropsProvider, error) { + directoryProperties, err := directoryClient.GetProperties(ctx, nil) + if err != nil { + return nil, err + } + return shareDirectoryPropertiesAdapter{&directoryProperties}, nil + }, common.EEntityType.Folder(), } } - -// azureFilesMetadataAdapter allows polymorphic treatment of File and Folder properties, since both implement the method -type azfilePropertiesAdapter interface { - NewMetadata() azfile.Metadata - LastModified() time.Time - FileLastWriteTime() string -} diff --git a/cmd/zc_traverser_file_account.go b/cmd/zc_traverser_file_account.go index 24ff30286..7ff7e9b4e 100644 --- a/cmd/zc_traverser_file_account.go +++ b/cmd/zc_traverser_file_account.go @@ -23,17 +23,13 @@ package cmd import ( "context" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" "github.com/Azure/azure-storage-azcopy/v10/common" - "net/url" - - "github.com/Azure/azure-pipeline-go/pipeline" - "github.com/Azure/azure-storage-file-go/azfile" ) // Enumerates an entire files account, looking into each matching share as it goes type fileAccountTraverser struct { - accountURL azfile.ServiceURL - p pipeline.Pipeline + serviceClient *service.Client ctx context.Context sharePattern string cachedShares []string @@ -51,20 +47,20 @@ func (t *fileAccountTraverser) IsDirectory(isSource bool) (bool, error) { func (t *fileAccountTraverser) listContainers() ([]string, error) { if len(t.cachedShares) == 0 { - marker := azfile.Marker{} shareList := make([]string, 0) - for marker.NotDone() { - resp, err := t.accountURL.ListSharesSegment(t.ctx, marker, azfile.ListSharesOptions{}) + pager := t.serviceClient.NewListSharesPager(nil) + for pager.More() { + resp, err := pager.NextPage(t.ctx) if err != nil { return nil, err } - for _, v := range resp.ShareItems { + for _, v := range resp.Shares { // Match a pattern for the share name and the share name only if t.sharePattern != "" { - if ok, err := containerNameMatchesPattern(v.Name, t.sharePattern); err != nil { + if ok, err := containerNameMatchesPattern(*v.Name, t.sharePattern); err != nil { // Break if the pattern is invalid return nil, err } else if !ok { @@ -73,10 +69,8 @@ func (t *fileAccountTraverser) listContainers() ([]string, error) { } } - shareList = append(shareList, v.Name) + shareList = append(shareList, *v.Name) } - - marker = resp.NextMarker } t.cachedShares = shareList @@ -95,8 +89,8 @@ func (t *fileAccountTraverser) Traverse(preprocessor objectMorpher, processor ob } for _, v := range shareList { - shareURL := t.accountURL.NewShareURL(v).URL() - shareTraverser := newFileTraverser(&shareURL, t.p, t.ctx, true, t.getProperties, t.incrementEnumerationCounter, t.trailingDot, t.destination) + shareURL := t.serviceClient.NewShareClient(v).URL() + shareTraverser := newFileTraverser(shareURL, t.serviceClient, t.ctx, true, t.getProperties, t.incrementEnumerationCounter, t.trailingDot, t.destination) preprocessorForThisChild := preprocessor.FollowedBy(newContainerDecorator(v)) @@ -111,14 +105,15 @@ func (t *fileAccountTraverser) Traverse(preprocessor objectMorpher, processor ob return nil } -func newFileAccountTraverser(rawURL *url.URL, p pipeline.Pipeline, ctx context.Context, getProperties bool, incrementEnumerationCounter enumerationCounterFunc, trailingDot common.TrailingDotOption, destination *common.Location) (t *fileAccountTraverser) { - fURLparts := azfile.NewFileURLParts(*rawURL) - sPattern := fURLparts.ShareName - - if fURLparts.ShareName != "" { - fURLparts.ShareName = "" +func newFileAccountTraverser(serviceClient *service.Client, shareName string, ctx context.Context, getProperties bool, incrementEnumerationCounter enumerationCounterFunc, trailingDot common.TrailingDotOption, destination *common.Location) (t *fileAccountTraverser) { + t = &fileAccountTraverser{ + ctx: ctx, + incrementEnumerationCounter: incrementEnumerationCounter, + serviceClient: serviceClient, + sharePattern: shareName, + getProperties: getProperties, + trailingDot: trailingDot, + destination: destination, } - - t = &fileAccountTraverser{p: p, ctx: ctx, incrementEnumerationCounter: incrementEnumerationCounter, accountURL: azfile.NewServiceURL(fURLparts.URL(), p), sharePattern: sPattern, getProperties: getProperties, trailingDot: trailingDot, destination: destination} return } diff --git a/cmd/zc_traverser_local.go b/cmd/zc_traverser_local.go index ee47a141c..7f3eb7064 100755 --- a/cmd/zc_traverser_local.go +++ b/cmd/zc_traverser_local.go @@ -585,7 +585,7 @@ func (t *localTraverser) prepareHashingThreads(preprocessor objectMorpher, proce fi.Size(), noContentProps, // Local MD5s are computed in the STE, and other props don't apply to local files noBlobProps, - noMetdata, + noMetadata, "", // Local has no such thing as containers ), processor, // the original processor is wrapped in the mutex processor. @@ -666,7 +666,7 @@ func (t *localTraverser) Traverse(preprocessor objectMorpher, processor objectPr singleFileInfo.Size(), noContentProps, // Local MD5s are computed in the STE, and other props don't apply to local files noBlobProps, - noMetdata, + noMetadata, "", // Local has no such thing as containers ), hashingProcessor, // hashingProcessor handles the mutex wrapper @@ -720,7 +720,7 @@ func (t *localTraverser) Traverse(preprocessor objectMorpher, processor objectPr fileInfo.Size(), noContentProps, // Local MD5s are computed in the STE, and other props don't apply to local files noBlobProps, - noMetdata, + noMetadata, "", // Local has no such thing as containers ), hashingProcessor, // hashingProcessor handles the mutex wrapper @@ -796,7 +796,7 @@ func (t *localTraverser) Traverse(preprocessor objectMorpher, processor objectPr fileInfo.Size(), noContentProps, // Local MD5s are computed in the STE, and other props don't apply to local files noBlobProps, - noMetdata, + noMetadata, "", // Local has no such thing as containers ), hashingProcessor, // hashingProcessor handles the mutex wrapper diff --git a/cmd/zt_copy_blob_download_test.go b/cmd/zt_copy_blob_download_test.go index dca152f7f..38945e41f 100644 --- a/cmd/zt_copy_blob_download_test.go +++ b/cmd/zt_copy_blob_download_test.go @@ -22,7 +22,6 @@ package cmd import ( "encoding/json" - "github.com/Azure/azure-pipeline-go/pipeline" "github.com/stretchr/testify/assert" "os" "path" @@ -32,13 +31,13 @@ import ( "testing" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" ) func TestInferredStripTopDirDownload(t *testing.T) { a := assert.New(t) - bsu := getBSU() - cURL, cName := createNewContainer(a, bsu) + bsc := getBlobServiceClient() + cc, cName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) blobNames := []string{ "*", // File name that we want to retain compatibility with @@ -50,7 +49,7 @@ func TestInferredStripTopDirDownload(t *testing.T) { // ----- TEST # 1: Test inferred as false by using escaped * ----- // set up container name - scenarioHelper{}.generateBlobsFromList(a, cURL, blobNames, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, blobNames, blockBlobDefaultData) dstDirName := scenarioHelper{}.generateLocalDirectory(a) @@ -165,25 +164,24 @@ func TestInferredStripTopDirDownload(t *testing.T) { // Test downloading the entire account. func TestDownloadAccount(t *testing.T) { a := assert.New(t) - bsu := getBSU() - rawBSU := scenarioHelper{}.getRawBlobServiceURLWithSAS(a) - p, err := InitPipeline(ctx, common.ELocation.Blob(), common.CredentialInfo{CredentialType: common.ECredentialType.Anonymous()}, pipeline.LogNone, common.ETrailingDotOption.Enable(), common.ELocation.Blob()) - a.Nil(err) + bsc := getBlobServiceClient() + rawBSC := scenarioHelper{}.getBlobServiceClientWithSAS(a) // Just in case there are no existing containers... - curl, _ := createNewContainer(a, bsu) - scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, curl, "") + cc, name := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") // Traverse the account ahead of time and determine the relative paths for testing. relPaths := make([]string, 0) // Use a map for easy lookup - blobTraverser := newBlobAccountTraverser(&rawBSU, p, ctx, false, func(common.EntityType) {}, false, common.CpkOptions{}, common.EPreservePermissionsOption.None(), false) + blobTraverser := newBlobAccountTraverser(rawBSC, name, ctx, false, func(common.EntityType) {}, false, common.CpkOptions{}, common.EPreservePermissionsOption.None(), false) processor := func(object StoredObject) error { // Append the container name to the relative path relPath := "/" + object.ContainerName + "/" + object.relativePath relPaths = append(relPaths, relPath) return nil } - err = blobTraverser.Traverse(noPreProccessor, processor, []ObjectFilter{}) + err := blobTraverser.Traverse(noPreProccessor, processor, []ObjectFilter{}) a.Nil(err) // set up a destination @@ -195,7 +193,7 @@ func TestDownloadAccount(t *testing.T) { Rpc = mockedRPC.intercept mockedRPC.init() - raw := getDefaultCopyRawInput(rawBSU.String(), dstDirName) + raw := getDefaultCopyRawInput(rawBSC.URL(), dstDirName) raw.recursive = true runCopyAndVerify(a, raw, func(err error) { @@ -208,24 +206,23 @@ func TestDownloadAccount(t *testing.T) { // Test downloading the entire account. func TestDownloadAccountWildcard(t *testing.T) { a := assert.New(t) - bsu := getBSU() - rawBSU := scenarioHelper{}.getRawBlobServiceURLWithSAS(a) - p, err := InitPipeline(ctx, common.ELocation.Blob(), common.CredentialInfo{CredentialType: common.ECredentialType.Anonymous()}, pipeline.LogNone, common.ETrailingDotOption.Enable(), common.ELocation.Blob()) - a.Nil(err) + bsc := getBlobServiceClient() + rawBSC := scenarioHelper{}.getBlobServiceClientWithSAS(a) // Create a unique container to be targeted. cname := generateName("blah-unique-blah", 63) - curl := bsu.NewContainerURL(cname) - _, err = curl.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone) + curl := bsc.NewContainerClient(cname) + _, err := curl.Create(ctx, nil) a.Nil(err) + defer deleteContainer(a, curl) scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, curl, "") // update the raw BSU to match the unique container name - rawBSU.Path = "/blah-unique-blah*" + container := "blah-unique-blah*" // Traverse the account ahead of time and determine the relative paths for testing. relPaths := make([]string, 0) // Use a map for easy lookup - blobTraverser := newBlobAccountTraverser(&rawBSU, p, ctx, false, func(common.EntityType) {}, false, common.CpkOptions{}, common.EPreservePermissionsOption.None(), false) + blobTraverser := newBlobAccountTraverser(rawBSC, container, ctx, false, func(common.EntityType) {}, false, common.CpkOptions{}, common.EPreservePermissionsOption.None(), false) processor := func(object StoredObject) error { // Append the container name to the relative path relPath := "/" + object.ContainerName + "/" + object.relativePath @@ -244,7 +241,7 @@ func TestDownloadAccountWildcard(t *testing.T) { Rpc = mockedRPC.intercept mockedRPC.init() - raw := getDefaultCopyRawInput(rawBSU.String(), dstDirName) + raw := getDefaultCopyRawInput(rawBSC.URL(), dstDirName) raw.recursive = true runCopyAndVerify(a, raw, func(err error) { @@ -257,15 +254,15 @@ func TestDownloadAccountWildcard(t *testing.T) { // regular blob->local file download func TestDownloadSingleBlobToFile(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) for _, blobName := range []string{"singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobList, blockBlobDefaultData) - a.NotNil(containerURL) + scenarioHelper{}.generateBlobsFromList(a, cc, blobList, blockBlobDefaultData) + a.NotNil(cc) // set up the destination as a single file dstDirName := scenarioHelper{}.generateLocalDirectory(a) @@ -312,13 +309,13 @@ func TestDownloadSingleBlobToFile(t *testing.T) { // regular container->directory download func TestDownloadBlobContainer(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotEqual(0, len(blobList)) // set up the destination with an empty folder @@ -358,14 +355,14 @@ func TestDownloadBlobContainer(t *testing.T) { // regular vdir->dir download func TestDownloadBlobVirtualDirectory(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() vdirName := "vdir1" // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, vdirName+common.AZCOPY_PATH_SEPARATOR_STRING) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, vdirName+common.AZCOPY_PATH_SEPARATOR_STRING) + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotEqual(0, len(blobList)) // set up the destination with an empty folder @@ -409,18 +406,18 @@ func TestDownloadBlobVirtualDirectory(t *testing.T) { // update test after re-writing copy enumerators func TestDownloadBlobContainerWithPattern(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobsToIgnore := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobsToIgnore := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotEqual(0, len(blobsToIgnore)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.pdf", "includeSub/wow/amazing.pdf"} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, blobsToInclude, blockBlobDefaultData) // set up the destination with an empty folder dstDirName := scenarioHelper{}.generateLocalDirectory(a) @@ -468,18 +465,18 @@ func TestDownloadBlobContainerWithPattern(t *testing.T) { // test for include with one regular expression func TestDownloadBlobContainerWithRegexInclude(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with blobs - containerURL, containerName := createNewContainer(a, bsu) - blobsToIgnore := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobsToIgnore := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotEqual(0, len(blobsToIgnore)) // add blobs that we wish to include blobsToInclude := []string{"tessssssssssssst.txt", "subOne/tetingessssss.jpeg", "subOne/tessssst/hi.pdf"} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, blobsToInclude, blockBlobDefaultData) // set up the destination with an empty folder dstDirName := scenarioHelper{}.generateLocalDirectory(a) @@ -520,18 +517,18 @@ func TestDownloadBlobContainerWithRegexInclude(t *testing.T) { // test multiple regular expression with include func TestDownloadBlobContainerWithMultRegexInclude(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with blobs - containerURL, containerName := createNewContainer(a, bsu) - blobsToIgnore := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobsToIgnore := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotEqual(0, len(blobsToIgnore)) // add blobs that we wish to include blobsToInclude := []string{"tessssssssssssst.txt", "zxcfile.txt", "subOne/tetingessssss.jpeg", "subOne/subTwo/tessssst.pdf"} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, blobsToInclude, blockBlobDefaultData) // set up the destination with an empty folder dstDirName := scenarioHelper{}.generateLocalDirectory(a) @@ -573,14 +570,14 @@ func TestDownloadBlobContainerWithMultRegexInclude(t *testing.T) { // testing empty expressions for both include and exclude func TestDownloadBlobContainerWithEmptyRegex(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with blobs - containerURL, containerName := createNewContainer(a, bsu) + cc, containerName := createNewContainer(a, bsc) // test empty regex flag so all blobs will be included since there is no filter - blobsToInclude := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + blobsToInclude := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotEqual(0, len(blobsToInclude)) // set up the destination with an empty folder @@ -615,18 +612,18 @@ func TestDownloadBlobContainerWithEmptyRegex(t *testing.T) { // testing exclude with one regular expression func TestDownloadBlobContainerWithRegexExclude(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with blobs - containerURL, containerName := createNewContainer(a, bsu) - blobsToInclude := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobsToInclude := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotEqual(0, len(blobsToInclude)) // add blobs that we wish to exclude blobsToIgnore := []string{"tessssssssssssst.txt", "subOne/tetingessssss.jpeg", "subOne/subTwo/tessssst.pdf"} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToIgnore, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, blobsToIgnore, blockBlobDefaultData) // set up the destination with an empty folder dstDirName := scenarioHelper{}.generateLocalDirectory(a) @@ -667,18 +664,18 @@ func TestDownloadBlobContainerWithRegexExclude(t *testing.T) { // testing exclude with multiple regular expressions func TestDownloadBlobContainerWithMultRegexExclude(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with blobs - containerURL, containerName := createNewContainer(a, bsu) - blobsToInclude := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobsToInclude := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotEqual(0, len(blobsToInclude)) // add blobs that we wish to exclude blobsToIgnore := []string{"tessssssssssssst.txt", "subOne/dogs.jpeg", "subOne/subTwo/tessssst.pdf"} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToIgnore, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, blobsToIgnore, blockBlobDefaultData) // set up the destination with an empty folder dstDirName := scenarioHelper{}.generateLocalDirectory(a) @@ -718,7 +715,7 @@ func TestDownloadBlobContainerWithMultRegexExclude(t *testing.T) { func TestDryrunCopyLocalToBlob(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the local source blobsToInclude := []string{"AzURE2021.jpeg", "sub1/dir2/HELLO-4.txt", "sub1/test/testing.txt"} @@ -728,9 +725,9 @@ func TestDryrunCopyLocalToBlob(t *testing.T) { a.NotNil(srcDirName) // set up the destination container - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, dstContainerURL) - a.NotNil(dstContainerURL) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, dstContainerClient) + a.NotNil(dstContainerClient) // set up interceptor mockedRPC := interceptor{} @@ -754,7 +751,7 @@ func TestDryrunCopyLocalToBlob(t *testing.T) { for i := 0; i < len(blobsToInclude); i++ { a.True(strings.Contains(msg[i], "DRYRUN: copy")) a.True(strings.Contains(msg[i], srcDirName)) - a.True(strings.Contains(msg[i], dstContainerURL.String())) + a.True(strings.Contains(msg[i], dstContainerClient.URL())) } a.True(testDryrunStatements(blobsToInclude, msg)) @@ -763,19 +760,19 @@ func TestDryrunCopyLocalToBlob(t *testing.T) { func TestDryrunCopyBlobToBlob(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up src container - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) blobsToInclude := []string{"AzURE2021.jpeg", "sub1/dir2/HELLO-4.txt", "sub1/test/testing.txt"} - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToInclude, blockBlobDefaultData) - a.NotNil(srcContainerURL) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, blobsToInclude, blockBlobDefaultData) + a.NotNil(srcContainerClient) // set up the destination - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, dstContainerURL) - a.NotNil(dstContainerURL) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, dstContainerClient) + a.NotNil(dstContainerClient) // set up interceptor mockedRPC := interceptor{} @@ -799,8 +796,8 @@ func TestDryrunCopyBlobToBlob(t *testing.T) { msg := mockedLcm.GatherAllLogs(mockedLcm.dryrunLog) for i := 0; i < len(blobsToInclude); i++ { a.True(strings.Contains(msg[i], "DRYRUN: copy")) - a.True(strings.Contains(msg[i], srcContainerURL.String())) - a.True(strings.Contains(msg[i], dstContainerURL.String())) + a.True(strings.Contains(msg[i], srcContainerClient.URL())) + a.True(strings.Contains(msg[i], dstContainerClient.URL())) } a.True(testDryrunStatements(blobsToInclude, msg)) @@ -809,18 +806,18 @@ func TestDryrunCopyBlobToBlob(t *testing.T) { func TestDryrunCopyBlobToBlobJson(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up src container - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) blobsToInclude := []string{"AzURE2021.jpeg"} - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToInclude, blockBlobDefaultData) - a.NotNil(srcContainerURL) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, blobsToInclude, blockBlobDefaultData) + a.NotNil(srcContainerClient) // set up the destination - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, dstContainerURL) - a.NotNil(dstContainerURL) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, dstContainerClient) + a.NotNil(dstContainerClient) // set up interceptor mockedRPC := interceptor{} diff --git a/cmd/zt_copy_blob_file_test.go b/cmd/zt_copy_blob_file_test.go index ccbfae601..666372acb 100644 --- a/cmd/zt_copy_blob_file_test.go +++ b/cmd/zt_copy_blob_file_test.go @@ -22,31 +22,30 @@ package cmd import ( "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" "github.com/stretchr/testify/assert" "strings" "testing" - - "github.com/Azure/azure-storage-blob-go/azblob" ) // TestBlobAccountCopyToFileShareS2S actually ends up testing the entire account->container scenario as that is not dependent on destination or source. func TestBlobAccountCopyToFileShareS2S(t *testing.T) { a := assert.New(t) - bsu := getBSU() - fsu := getFSU() + bsc := getBlobServiceClient() + fsc := getFileServiceClient() // Ensure no containers with similar naming schemes exist - cleanBlobAccount(a, bsu) + cleanBlobAccount(a, bsc) - containerSources := map[string]azblob.ContainerURL{} + containerSources := map[string]*container.Client{} expectedTransfers := make([]string, 0) for k := range make([]bool, 5) { name := generateName(fmt.Sprintf("blobacc-file%dcontainer", k), 63) // create the container - containerSources[name] = bsu.NewContainerURL(name) - _, err := containerSources[name].Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone) + containerSources[name] = bsc.NewContainerClient(name) + _, err := containerSources[name].Create(ctx, nil) a.Nil(err) // Generate the remote scenario @@ -60,7 +59,7 @@ func TestBlobAccountCopyToFileShareS2S(t *testing.T) { } // generate destination share - dstShareURL, dstShareName := createNewAzureShare(a, fsu) + dstShareURL, dstShareName := createNewShare(a, fsc) defer deleteShare(a, dstShareURL) // initialize mocked RPC @@ -86,19 +85,19 @@ func TestBlobAccountCopyToFileShareS2S(t *testing.T) { // TestBlobCopyToFileS2SImplicitDstShare uses a service-level URL on the destination to implicitly create the destination share. func TestBlobCopyToFileS2SImplicitDstShare(t *testing.T) { a := assert.New(t) - bsu := getBSU() - fsu := getFSU() + bsc := getBlobServiceClient() + fsc := getFileServiceClient() // create source container - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) // prepare a destination container URL to be deleted. - dstShareURL := fsu.NewShareURL(srcContainerName) - defer deleteShare(a, dstShareURL) + dstShareClient := fsc.NewShareClient(srcContainerName) + defer deleteShare(a, dstShareClient) // create a scenario on the source container - fileList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "blobFileImplicitDest") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerClient, "blobFileImplicitDest") a.NotZero(len(fileList)) // Ensure that at least one blob is present // initialize the mocked RPC @@ -116,7 +115,7 @@ func TestBlobCopyToFileS2SImplicitDstShare(t *testing.T) { runCopyAndVerify(a, raw, func(err error) { a.Nil(err) // Check there was no error - _, err = dstShareURL.GetProperties(ctx) + _, err = dstShareClient.GetProperties(ctx, nil) a.Nil(err) // Ensure the destination share exists // Ensure the transfers were scheduled @@ -126,18 +125,18 @@ func TestBlobCopyToFileS2SImplicitDstShare(t *testing.T) { func TestBlobCopyToFileS2SWithSingleFile(t *testing.T) { a := assert.New(t) - bsu := getBSU() - fsu := getFSU() + bsc := getBlobServiceClient() + fsu := getFileServiceClient() - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteContainer(a, srcContainerURL) - defer deleteShare(a, dstShareURL) + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstShareClient, dstShareName := createNewShare(a, fsu) + defer deleteContainer(a, srcContainerClient) + defer deleteShare(a, dstShareClient) // copy to explicit destination for _, fileName := range []string{"singlefileisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the source container with a single file - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, []string{fileName}, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, []string{fileName}, blockBlobDefaultData) // set up the interceptor mockedRPC := interceptor{} @@ -183,17 +182,17 @@ func TestBlobCopyToFileS2SWithSingleFile(t *testing.T) { func TestContainerToShareCopyS2S(t *testing.T) { a := assert.New(t) - bsu := getBSU() - fsu := getFSU() + bsc := getBlobServiceClient() + fsc := getFileServiceClient() // Create source container and destination share, schedule their deletion - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteContainer(a, srcContainerURL) - defer deleteShare(a, dstShareURL) + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteContainer(a, srcContainerClient) + defer deleteShare(a, dstShareClient) // set up the source container with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerClient, "") a.NotZero(len(fileList)) // set up the interceptor @@ -228,14 +227,14 @@ func TestContainerToShareCopyS2S(t *testing.T) { func TestBlobFileCopyS2SWithIncludeAndIncludeDirFlag(t *testing.T) { a := assert.New(t) - bsu := getBSU() - fsu := getFSU() + bsc := getBlobServiceClient() + fsc := getFileServiceClient() // generate source container and destination fileshare - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteContainer(a, srcContainerURL) - defer deleteShare(a, dstShareURL) + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteContainer(a, srcContainerClient) + defer deleteShare(a, dstShareClient) // create file list to include against fileList := []string{ @@ -265,7 +264,7 @@ func TestBlobFileCopyS2SWithIncludeAndIncludeDirFlag(t *testing.T) { // set up filters and generate blobs includeString := "*.pdf;*.jpeg;exactName" includePathString := "subdir/" - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, fileList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, fileList, blockBlobDefaultData) // set up the interceptor mockedRPC := interceptor{} @@ -294,14 +293,14 @@ func TestBlobFileCopyS2SWithIncludeAndIncludeDirFlag(t *testing.T) { func TestBlobToFileCopyS2SWithExcludeAndExcludeDirFlag(t *testing.T) { a := assert.New(t) - bsu := getBSU() - fsu := getFSU() + bsc := getBlobServiceClient() + fsc := getFileServiceClient() // generate source container and destination fileshare - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteContainer(a, srcContainerURL) - defer deleteShare(a, dstShareURL) + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteContainer(a, srcContainerClient) + defer deleteShare(a, dstShareClient) // create file list to include against fileList := []string{ @@ -328,7 +327,7 @@ func TestBlobToFileCopyS2SWithExcludeAndExcludeDirFlag(t *testing.T) { // set up filters and generate blobs excludeString := "*.pdf;*.jpeg;exactName" excludePathString := "subdir/" - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, fileList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, fileList, blockBlobDefaultData) // set up the interceptor mockedRPC := interceptor{} @@ -357,14 +356,14 @@ func TestBlobToFileCopyS2SWithExcludeAndExcludeDirFlag(t *testing.T) { func TestBlobToFileCopyS2SIncludeExcludeMix(t *testing.T) { a := assert.New(t) - bsu := getBSU() - fsu := getFSU() + bsc := getBlobServiceClient() + fsc := getFileServiceClient() // generate source container and destination fileshare - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteContainer(a, srcContainerURL) - defer deleteShare(a, dstShareURL) + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteContainer(a, srcContainerClient) + defer deleteShare(a, dstShareClient) // create file list to include against fileList := []string{ @@ -390,7 +389,7 @@ func TestBlobToFileCopyS2SIncludeExcludeMix(t *testing.T) { // set up filters and generate blobs includeString := "*.pdf;*.jpeg;exactName" excludeString := "ohno*;why*;exactName" - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, fileList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, fileList, blockBlobDefaultData) // set up the interceptor mockedRPC := interceptor{} @@ -413,18 +412,18 @@ func TestBlobToFileCopyS2SIncludeExcludeMix(t *testing.T) { func TestBlobToFileCopyS2SWithDirectory(t *testing.T) { a := assert.New(t) - bsu := getBSU() - fsu := getFSU() + bsc := getBlobServiceClient() + fsc := getFileServiceClient() // create container and share - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteContainer(a, srcContainerURL) - defer deleteShare(a, dstShareURL) + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteContainer(a, srcContainerClient) + defer deleteShare(a, dstShareClient) // create source scenario dirName := "copyme" - fileList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, dirName+"/") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerClient, dirName+"/") a.NotZero(len(fileList)) // initialize mocked RPC diff --git a/cmd/zt_copy_blob_upload_test.go b/cmd/zt_copy_blob_upload_test.go index e12a3bbfb..64b38f8b0 100644 --- a/cmd/zt_copy_blob_upload_test.go +++ b/cmd/zt_copy_blob_upload_test.go @@ -33,9 +33,9 @@ import ( func TestIncludeDirSimple(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) files := []string{ "filea", @@ -72,9 +72,9 @@ func TestIncludeDirSimple(t *testing.T) { func TestIncludeDir(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) files := []string{ "filea", @@ -114,9 +114,9 @@ func TestIncludeDir(t *testing.T) { func TestExcludeDir(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) files := []string{ "filea", @@ -156,9 +156,9 @@ func TestExcludeDir(t *testing.T) { func TestIncludeAndExcludeDir(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) files := []string{ "xyz/aaa", @@ -195,9 +195,9 @@ func TestIncludeAndExcludeDir(t *testing.T) { // regular local file->blob upload func TestUploadSingleFileToBlobVirtualDirectory(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) for _, srcFileName := range []string{"singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the source as a single file @@ -256,9 +256,9 @@ func TestUploadSingleFileToBlobVirtualDirectory(t *testing.T) { // regular local file->blob upload func TestUploadSingleFileToBlob(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) for _, srcFileName := range []string{"singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the source as a single file @@ -269,8 +269,8 @@ func TestUploadSingleFileToBlob(t *testing.T) { // set up the destination container with a single blob dstBlobName := "whatever" - scenarioHelper{}.generateBlobsFromList(a, containerURL, []string{dstBlobName}, blockBlobDefaultData) - a.NotNil(containerURL) + scenarioHelper{}.generateBlobsFromList(a, cc, []string{dstBlobName}, blockBlobDefaultData) + a.NotNil(cc) // set up interceptor mockedRPC := interceptor{} @@ -314,7 +314,7 @@ func TestUploadSingleFileToBlob(t *testing.T) { // regular directory->container upload func TestUploadDirectoryToContainer(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the source with numerous files srcDirPath := scenarioHelper{}.generateLocalDirectory(a) @@ -322,8 +322,9 @@ func TestUploadDirectoryToContainer(t *testing.T) { fileList := scenarioHelper{}.generateCommonRemoteScenarioForLocal(a, srcDirPath, "") // set up an empty container - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) // set up interceptor mockedRPC := interceptor{} @@ -359,7 +360,7 @@ func TestUploadDirectoryToContainer(t *testing.T) { // regular directory->virtual dir upload func TestUploadDirectoryToVirtualDirectory(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() vdirName := "vdir" // set up the source with numerous files @@ -368,8 +369,8 @@ func TestUploadDirectoryToVirtualDirectory(t *testing.T) { fileList := scenarioHelper{}.generateCommonRemoteScenarioForLocal(a, srcDirPath, "") // set up an empty container - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) // set up interceptor mockedRPC := interceptor{} @@ -406,7 +407,7 @@ func TestUploadDirectoryToVirtualDirectory(t *testing.T) { // files(from pattern)->container upload func TestUploadDirectoryToContainerWithPattern(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the source with numerous files srcDirPath := scenarioHelper{}.generateLocalDirectory(a) @@ -418,8 +419,8 @@ func TestUploadDirectoryToContainerWithPattern(t *testing.T) { scenarioHelper{}.generateLocalFilesFromList(a, srcDirPath, filesToInclude) // set up an empty container - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) // set up interceptor mockedRPC := interceptor{} @@ -456,7 +457,7 @@ func TestUploadDirectoryToContainerWithIncludeBefore_LocalTime(t *testing.T) { } func doTestUploadDirectoryToContainerWithIncludeBefore(useUtc bool, a *assert.Assertions) { - bsu := getBSU() + bsc := getBlobServiceClient() // set up the source directory srcDirPath := scenarioHelper{}.generateLocalDirectory(a) @@ -473,8 +474,8 @@ func doTestUploadDirectoryToContainerWithIncludeBefore(useUtc bool, a *assert.As scenarioHelper{}.generateLocalFilesFromList(a, srcDirPath, extraIgnoredFiles) // set up an empty container - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) // set up interceptor mockedRPC := interceptor{} @@ -515,7 +516,7 @@ func TestUploadDirectoryToContainerWithIncludeAfter_LocalTime(t *testing.T) { } func doTestUploadDirectoryToContainerWithIncludeAfter(useUtc bool, a *assert.Assertions) { - bsu := getBSU() + bsc := getBlobServiceClient() // set up the source with numerous files srcDirPath := scenarioHelper{}.generateLocalDirectory(a) @@ -531,8 +532,8 @@ func doTestUploadDirectoryToContainerWithIncludeAfter(useUtc bool, a *assert.Ass scenarioHelper{}.generateLocalFilesFromList(a, srcDirPath, filesToInclude) // set up an empty container - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) // set up interceptor mockedRPC := interceptor{} @@ -564,9 +565,9 @@ func doTestUploadDirectoryToContainerWithIncludeAfter(useUtc bool, a *assert.Ass func TestDisableAutoDecoding(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) // Encoded file name since Windows won't create name with invalid chars srcFileName := `%3C %3E %5C %2F %3A %22 %7C %3F %2A invalidcharsfile` diff --git a/cmd/zt_copy_file_file_test.go b/cmd/zt_copy_file_file_test.go index 42d2d0788..07480b477 100644 --- a/cmd/zt_copy_file_file_test.go +++ b/cmd/zt_copy_file_file_test.go @@ -29,16 +29,16 @@ import ( // regular file->file copy func TestFileCopyS2SWithSingleFile(t *testing.T) { a := assert.New(t) - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(a, fsu) - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteShare(a, srcShareURL) - defer deleteShare(a, dstShareURL) + fsc := getFileServiceClient() + srcShareClient, srcShareName := createNewShare(a, fsc) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteShare(a, srcShareClient) + defer deleteShare(a, dstShareClient) for _, fileName := range []string{"singlefileisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the source share with a single file fileList := []string{fileName} - scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, fileList) + scenarioHelper{}.generateShareFilesFromList(a, srcShareClient, fsc, fileList) // set up interceptor mockedRPC := interceptor{} @@ -83,14 +83,14 @@ func TestFileCopyS2SWithSingleFile(t *testing.T) { // regular share->share copy func TestFileCopyS2SWithShares(t *testing.T) { a := assert.New(t) - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(a, fsu) - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteShare(a, srcShareURL) - defer deleteShare(a, dstShareURL) + fsc := getFileServiceClient() + srcShareClient, srcShareName := createNewShare(a, fsc) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteShare(a, srcShareClient) + defer deleteShare(a, dstShareClient) // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareClient, fsc, "") a.NotZero(len(fileList)) // set up interceptor @@ -129,19 +129,19 @@ func TestFileCopyS2SWithShares(t *testing.T) { // include flag limits the scope of source/destination comparison func TestFileCopyS2SWithIncludeFlag(t *testing.T) { a := assert.New(t) - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(a, fsu) - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteShare(a, srcShareURL) - defer deleteShare(a, dstShareURL) + fsc := getFileServiceClient() + srcShareClient, srcShareName := createNewShare(a, fsc) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteShare(a, srcShareClient) + defer deleteShare(a, dstShareClient) // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareClient, fsc, "") a.NotZero(len(fileList)) // add special files that we wish to include filesToInclude := []string{"important.pdf", "includeSub/amazing.jpeg", "exactName"} - scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, filesToInclude) + scenarioHelper{}.generateShareFilesFromList(a, srcShareClient, fsc, filesToInclude) includeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -166,19 +166,19 @@ func TestFileCopyS2SWithIncludeFlag(t *testing.T) { // exclude flag limits the scope of source/destination comparison func TestFileCopyS2SWithExcludeFlag(t *testing.T) { a := assert.New(t) - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(a, fsu) - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteShare(a, srcShareURL) - defer deleteShare(a, dstShareURL) + fsc := getFileServiceClient() + srcShareClient, srcShareName := createNewShare(a, fsc) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteShare(a, srcShareClient) + defer deleteShare(a, dstShareClient) // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareClient, fsc, "") a.NotZero(len(fileList)) // add special files that we wish to exclude filesToExclude := []string{"notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, filesToExclude) + scenarioHelper{}.generateShareFilesFromList(a, srcShareClient, fsc, filesToExclude) excludeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -203,25 +203,25 @@ func TestFileCopyS2SWithExcludeFlag(t *testing.T) { // include and exclude flag can work together to limit the scope of source/destination comparison func TestFileCopyS2SWithIncludeAndExcludeFlag(t *testing.T) { a := assert.New(t) - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(a, fsu) - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteShare(a, srcShareURL) - defer deleteShare(a, dstShareURL) + fsc := getFileServiceClient() + srcShareClient, srcShareName := createNewShare(a, fsc) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteShare(a, srcShareClient) + defer deleteShare(a, dstShareClient) // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareClient, fsc, "") a.NotZero(len(fileList)) // add special files that we wish to include filesToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, filesToInclude) + scenarioHelper{}.generateShareFilesFromList(a, srcShareClient, fsc, filesToInclude) includeString := "*.pdf;*.jpeg;exactName" // add special files that we wish to exclude // note that the excluded files also match the include string filesToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, filesToExclude) + scenarioHelper{}.generateShareFilesFromList(a, srcShareClient, fsc, filesToExclude) excludeString := "so*;not*;exactName" // set up interceptor @@ -247,19 +247,19 @@ func TestFileCopyS2SWithIncludeAndExcludeFlag(t *testing.T) { // regular dir -> dir copy func TestFileCopyS2SWithDirectory(t *testing.T) { a := assert.New(t) - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(a, fsu) - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteShare(a, srcShareURL) - defer deleteShare(a, dstShareURL) + fsc := getFileServiceClient() + srcShareClient, srcShareName := createNewShare(a, fsc) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteShare(a, srcShareClient) + defer deleteShare(a, dstShareClient) // set up the source share with numerous files dirName := "dir" - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, dirName+"/") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareClient, fsc, dirName+"/") a.NotZero(len(fileList)) // set up the destination with the exact same files - scenarioHelper{}.generateAzureFilesFromList(a, dstShareURL, fileList) + scenarioHelper{}.generateShareFilesFromList(a, dstShareClient, fsc, fileList) // set up interceptor mockedRPC := interceptor{} diff --git a/cmd/zt_copy_s2smigration_test.go b/cmd/zt_copy_s2smigration_test.go index 0563d3125..0d666d5d0 100644 --- a/cmd/zt_copy_s2smigration_test.go +++ b/cmd/zt_copy_s2smigration_test.go @@ -23,6 +23,8 @@ package cmd import ( "context" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/stretchr/testify/assert" "net/url" "os" @@ -32,7 +34,6 @@ import ( "github.com/Azure/azure-storage-azcopy/v10/azbfs" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" ) // Additional S2S migration cases, besides E2E smoke testing cases for S3/blob/file source contained in test_service_to_service_copy.py @@ -185,10 +186,10 @@ func TestS2SCopyFromS3ToBlobWithBucketNameNeedBeResolved(t *testing.T) { // Check container with resolved name has been created resolvedBucketName := strings.Replace(bucketName, invalidPrefix, resolvedPrefix, 1) - blobServiceURL := scenarioHelper{}.getBlobServiceURL(a) - containerURL := blobServiceURL.NewContainerURL(resolvedBucketName) - a.True(scenarioHelper{}.containerExists(containerURL)) - defer deleteContainer(a, containerURL) + bsc := scenarioHelper{}.getBlobServiceClient(a) + cc := bsc.NewContainerClient(resolvedBucketName) + a.True(scenarioHelper{}.containerExists(cc)) + defer deleteContainer(a, cc) // Check correct entry are scheduled. // Example: @@ -240,10 +241,10 @@ func TestS2SCopyFromS3ToBlobWithWildcardInSrcAndBucketNameNeedBeResolved(t *test // Check container with resolved name has been created resolvedBucketName := strings.Replace(bucketName, invalidPrefix, resolvedPrefix, 1) - blobServiceURL := scenarioHelper{}.getBlobServiceURL(a) - containerURL := blobServiceURL.NewContainerURL(resolvedBucketName) - a.True(scenarioHelper{}.containerExists(containerURL)) - defer deleteContainer(a, containerURL) + bsc := scenarioHelper{}.getBlobServiceClient(a) + cc := bsc.NewContainerClient(resolvedBucketName) + a.True(scenarioHelper{}.containerExists(cc)) + defer deleteContainer(a, cc) // Check correct entry are scheduled. // Example: @@ -615,10 +616,10 @@ func TestS2SCopyFromGCPToBlobWithBucketNameNeedBeResolved(t *testing.T) { // Check container with resolved name has been created resolvedBucketName := strings.Replace(bucketName, invalidPrefix, resolvedPrefix, 1) - blobServiceURL := scenarioHelper{}.getBlobServiceURL(a) - containerURL := blobServiceURL.NewContainerURL(resolvedBucketName) - a.True(scenarioHelper{}.containerExists(containerURL)) - defer deleteContainer(a, containerURL) + bsc := scenarioHelper{}.getBlobServiceClient(a) + cc := bsc.NewContainerClient(resolvedBucketName) + a.True(scenarioHelper{}.containerExists(cc)) + defer deleteContainer(a, cc) // Check correct entry are scheduled. // Example: @@ -664,10 +665,10 @@ func TestS2SCopyFromGCPToBlobWithWildcardInSrcAndBucketNameNeedBeResolved(t *tes // Check container with resolved name has been created resolvedBucketName := strings.Replace(bucketName, invalidPrefix, resolvedPrefix, 1) - blobServiceURL := scenarioHelper{}.getBlobServiceURL(a) - containerURL := blobServiceURL.NewContainerURL(resolvedBucketName) - a.True(scenarioHelper{}.containerExists(containerURL)) - defer deleteContainer(a, containerURL) + bsc := scenarioHelper{}.getBlobServiceClient(a) + cc := bsc.NewContainerClient(resolvedBucketName) + a.True(scenarioHelper{}.containerExists(cc)) + defer deleteContainer(a, cc) validateS2STransfersAreScheduled(a, common.AZCOPY_PATH_SEPARATOR_STRING+bucketName, common.AZCOPY_PATH_SEPARATOR_STRING+resolvedBucketName, objectList, mockedRPC) }) @@ -818,18 +819,18 @@ func TestS2SCopyFromGCPObjectToBlobContainer(t *testing.T) { // Copy from container to container, preserve blob tier. func TestS2SCopyFromContainerToContainerPreserveBlobTier(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - a.NotNil(srcContainerURL) + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + a.NotNil(srcContainerClient) blobName := "blobWithCoolTier" - scenarioHelper{}.generateBlockBlobWithAccessTier(a, srcContainerURL, blobName, azblob.AccessTierCool) + scenarioHelper{}.generateBlockBlobWithAccessTier(a, srcContainerClient, blobName, to.Ptr(blob.AccessTierCool)) - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, dstContainerURL) - a.NotNil(dstContainerURL) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, dstContainerClient) + a.NotNil(dstContainerClient) // set up interceptor mockedRPC := interceptor{} @@ -847,25 +848,25 @@ func TestS2SCopyFromContainerToContainerPreserveBlobTier(t *testing.T) { validateS2STransfersAreScheduled(a, "", "/"+srcContainerName, []string{common.AZCOPY_PATH_SEPARATOR_STRING + blobName}, mockedRPC) // common.AZCOPY_PATH_SEPARATOR_STRING added for JobPartPlan file change. - a.Equal(azblob.AccessTierCool, mockedRPC.transfers[0].BlobTier) + a.Equal(blob.AccessTierCool, mockedRPC.transfers[0].BlobTier) }) } // Copy from container to container, and don't preserve blob tier. func TestS2SCopyFromContainerToContainerNoPreserveBlobTier(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - a.NotNil(srcContainerURL) + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + a.NotNil(srcContainerClient) blobName := "blobWithCoolTier" - scenarioHelper{}.generateBlockBlobWithAccessTier(a, srcContainerURL, blobName, azblob.AccessTierCool) + scenarioHelper{}.generateBlockBlobWithAccessTier(a, srcContainerClient, blobName, to.Ptr(blob.AccessTierCool)) - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, dstContainerURL) - a.NotNil(dstContainerURL) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, dstContainerClient) + a.NotNil(dstContainerClient) // set up interceptor mockedRPC := interceptor{} @@ -885,7 +886,7 @@ func TestS2SCopyFromContainerToContainerNoPreserveBlobTier(t *testing.T) { validateS2STransfersAreScheduled(a, "", "/"+srcContainerName, []string{common.AZCOPY_PATH_SEPARATOR_STRING + blobName}, mockedRPC) // common.AZCOPY_PATH_SEPARATOR_STRING added for JobPartPlan file change. - a.Equal(azblob.AccessTierNone, mockedRPC.transfers[0].BlobTier) + a.Equal(blob.AccessTier(""), mockedRPC.transfers[0].BlobTier) }) } @@ -893,19 +894,19 @@ func TestS2SCopyFromContainerToContainerNoPreserveBlobTier(t *testing.T) { func TestS2SCopyFromPageToBlockBlob(t *testing.T) { a := assert.New(t) t.Skip("Enable after setting Account to non-HNS") - bsu := getBSU() + bsc := getBlobServiceClient() // Generate source container and blobs - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - a.NotNil(srcContainerURL) + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + a.NotNil(srcContainerClient) objectList := []string{"file", "sub/file2"} - scenarioHelper{}.generatePageBlobsFromList(a, srcContainerURL, objectList, pageBlobDefaultData) + scenarioHelper{}.generatePageBlobsFromList(a, srcContainerClient, objectList, pageBlobDefaultData) // Create destination container - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, dstContainerURL) - a.NotNil(dstContainerURL) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, dstContainerClient) + a.NotNil(dstContainerClient) // Set up interceptor mockedRPC := interceptor{} @@ -946,19 +947,19 @@ func TestS2SCopyFromPageToBlockBlob(t *testing.T) { // Attempt to copy from a block blob to a page blob func TestS2SCopyFromBlockToPageBlob(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // Generate source container and blobs - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - a.NotNil(srcContainerURL) + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + a.NotNil(srcContainerClient) objectList := []string{"file", "sub/file2"} - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, objectList, pageBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, objectList, pageBlobDefaultData) // Create destination container - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, dstContainerURL) - a.NotNil(dstContainerURL) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, dstContainerClient) + a.NotNil(dstContainerClient) // Set up interceptor mockedRPC := interceptor{} @@ -999,19 +1000,19 @@ func TestS2SCopyFromBlockToPageBlob(t *testing.T) { // Attempt to copy from a block blob to an append blob func TestS2SCopyFromBlockToAppendBlob(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // Generate source container and blobs - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - a.NotNil(srcContainerURL) + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + a.NotNil(srcContainerClient) objectList := []string{"file", "sub/file2"} - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, objectList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, objectList, blockBlobDefaultData) // Create destination container - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, dstContainerURL) - a.NotNil(dstContainerURL) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, dstContainerClient) + a.NotNil(dstContainerClient) // Set up interceptor mockedRPC := interceptor{} @@ -1053,19 +1054,19 @@ func TestS2SCopyFromBlockToAppendBlob(t *testing.T) { func TestS2SCopyFromAppendToBlockBlob(t *testing.T) { a := assert.New(t) t.Skip("Enable after setting Account to non-HNS") - bsu := getBSU() + bsc := getBlobServiceClient() // Generate source container and blobs - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - a.NotNil(srcContainerURL) + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + a.NotNil(srcContainerClient) objectList := []string{"file", "sub/file2"} - scenarioHelper{}.generateAppendBlobsFromList(a, srcContainerURL, objectList, appendBlobDefaultData) + scenarioHelper{}.generateAppendBlobsFromList(a, srcContainerClient, objectList, appendBlobDefaultData) // Create destination container - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, dstContainerURL) - a.NotNil(dstContainerURL) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, dstContainerClient) + a.NotNil(dstContainerClient) // Set up interceptor mockedRPC := interceptor{} @@ -1107,19 +1108,19 @@ func TestS2SCopyFromAppendToBlockBlob(t *testing.T) { func TestS2SCopyFromPageToAppendBlob(t *testing.T) { a := assert.New(t) t.Skip("Enable after setting Account to non-HNS") - bsu := getBSU() + bsc := getBlobServiceClient() // Generate source container and blobs - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - a.NotNil(srcContainerURL) + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + a.NotNil(srcContainerClient) objectList := []string{"file", "sub/file2"} - scenarioHelper{}.generatePageBlobsFromList(a, srcContainerURL, objectList, pageBlobDefaultData) + scenarioHelper{}.generatePageBlobsFromList(a, srcContainerClient, objectList, pageBlobDefaultData) // Create destination container - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, dstContainerURL) - a.NotNil(dstContainerURL) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, dstContainerClient) + a.NotNil(dstContainerClient) // Set up interceptor mockedRPC := interceptor{} @@ -1161,19 +1162,19 @@ func TestS2SCopyFromPageToAppendBlob(t *testing.T) { func TestS2SCopyFromAppendToPageBlob(t *testing.T) { a := assert.New(t) t.Skip("Enable after setting Account to non-HNS") - bsu := getBSU() + bsc := getBlobServiceClient() // Generate source container and blobs - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - a.NotNil(srcContainerURL) + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + a.NotNil(srcContainerClient) objectList := []string{"file", "sub/file2"} - scenarioHelper{}.generateAppendBlobsFromList(a, srcContainerURL, objectList, pageBlobDefaultData) + scenarioHelper{}.generateAppendBlobsFromList(a, srcContainerClient, objectList, pageBlobDefaultData) // Create destination container - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, dstContainerURL) - a.NotNil(dstContainerURL) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, dstContainerClient) + a.NotNil(dstContainerClient) // Set up interceptor mockedRPC := interceptor{} @@ -1213,18 +1214,18 @@ func TestS2SCopyFromAppendToPageBlob(t *testing.T) { func TestS2SCopyFromSingleBlobToBlobContainer(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - a.NotNil(srcContainerURL) + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + a.NotNil(srcContainerClient) objectList := []string{"file", "sub/file2"} - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, objectList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, objectList, blockBlobDefaultData) - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, dstContainerURL) - a.NotNil(dstContainerURL) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, dstContainerClient) + a.NotNil(dstContainerClient) // set up interceptor mockedRPC := interceptor{} @@ -1265,18 +1266,18 @@ func TestS2SCopyFromSingleBlobToBlobContainer(t *testing.T) { func TestS2SCopyFromSingleAzureFileToBlobContainer(t *testing.T) { a := assert.New(t) - bsu := getBSU() - fsu := getFSU() + bsc := getBlobServiceClient() + fsc := getFileServiceClient() - srcShareURL, srcShareName := createNewAzureShare(a, fsu) - defer deleteShare(a, srcShareURL) - a.NotNil(srcShareURL) + srcShareClient, srcShareName := createNewShare(a, fsc) + defer deleteShare(a, srcShareClient) + a.NotNil(srcShareClient) - scenarioHelper{}.generateFlatFiles(a, srcShareURL, []string{"file"}) + scenarioHelper{}.generateFlatFiles(a, srcShareClient, []string{"file"}) - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, dstContainerURL) - a.NotNil(dstContainerURL) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, dstContainerClient) + a.NotNil(dstContainerClient) // set up interceptor mockedRPC := interceptor{} diff --git a/cmd/zt_generic_processor_test.go b/cmd/zt_generic_processor_test.go index 9a545062c..3f960cd4a 100644 --- a/cmd/zt_generic_processor_test.go +++ b/cmd/zt_generic_processor_test.go @@ -64,10 +64,10 @@ func (processorTestSuiteHelper) getCopyJobTemplate() *common.CopyJobPartOrderReq func TestCopyTransferProcessorMultipleFiles(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up source and destination - containerURL, _ := getContainerURL(a, bsu) + cc, _ := getContainerClient(a, bsc) dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) @@ -81,7 +81,7 @@ func TestCopyTransferProcessorMultipleFiles(t *testing.T) { for _, numOfParts := range []int{1, 3} { numOfTransfersPerPart := len(sampleObjects) / numOfParts copyProcessor := newCopyTransferProcessor(processorTestSuiteHelper{}.getCopyJobTemplate(), numOfTransfersPerPart, - newRemoteRes(containerURL.String()), newLocalRes(dstDirName), nil, nil, false, false) + newRemoteRes(cc.URL()), newLocalRes(dstDirName), nil, nil, false, false) // go through the objects and make sure they are processed without error for _, storedObject := range sampleObjects { @@ -106,14 +106,14 @@ func TestCopyTransferProcessorMultipleFiles(t *testing.T) { func TestCopyTransferProcessorSingleFile(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, _ := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, _ := createNewContainer(a, bsc) + defer deleteContainer(a, cc) // set up the container with a single blob blobList := []string{"singlefile101"} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobList, blockBlobDefaultData) - a.NotNil(containerURL) + scenarioHelper{}.generateBlobsFromList(a, cc, blobList, blockBlobDefaultData) + a.NotNil(cc) // set up the directory with a single file dstDirName := scenarioHelper{}.generateLocalDirectory(a) @@ -127,12 +127,12 @@ func TestCopyTransferProcessorSingleFile(t *testing.T) { mockedRPC.init() // set up the processor - blobURL := containerURL.NewBlockBlobURL(blobList[0]).String() + blobURL := cc.NewBlobClient(blobList[0]).URL() copyProcessor := newCopyTransferProcessor(processorTestSuiteHelper{}.getCopyJobTemplate(), 2, newRemoteRes(blobURL), newLocalRes(filepath.Join(dstDirName, dstFileName)), nil, nil, false, false) // exercise the copy transfer processor - storedObject := newStoredObject(noPreProccessor, blobList[0], "", common.EEntityType.File(), time.Now(), 0, noContentProps, noBlobProps, noMetdata, "") + storedObject := newStoredObject(noPreProccessor, blobList[0], "", common.EEntityType.File(), time.Now(), 0, noContentProps, noBlobProps, noMetadata, "") err := copyProcessor.scheduleCopyTransfer(storedObject) a.Nil(err) diff --git a/cmd/zt_generic_service_traverser_test.go b/cmd/zt_generic_service_traverser_test.go index 6c87290b8..503d62028 100644 --- a/cmd/zt_generic_service_traverser_test.go +++ b/cmd/zt_generic_service_traverser_test.go @@ -6,14 +6,12 @@ import ( "testing" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" ) func TestServiceTraverserWithManyObjects(t *testing.T) { a := assert.New(t) - bsu := getBSU() - fsu := getFSU() + bsc := getBlobServiceClient() + fsc := getFileServiceClient() testS3 := false // Only test S3 if credentials are present. testGCP := false s3Client, err := createS3ClientWithMinio(createS3ResOptions{}) @@ -39,8 +37,8 @@ func TestServiceTraverserWithManyObjects(t *testing.T) { cleanGCPAccount(gcpClient) } // BlobFS is tested on the same account, therefore this is safe to clean up this way - cleanBlobAccount(a, bsu) - cleanFileAccount(a, fsu) + cleanBlobAccount(a, bsc) + cleanFileAccount(a, fsc) containerList := []string{ generateName("suchcontainermanystorage", 63), @@ -65,8 +63,8 @@ func TestServiceTraverserWithManyObjects(t *testing.T) { objectData := "Hello world!" // Generate remote scenarios - scenarioHelper{}.generateBlobContainersAndBlobsFromLists(a, bsu, containerList, objectList, objectData) - scenarioHelper{}.generateFileSharesAndFilesFromLists(a, fsu, containerList, objectList, objectData) + scenarioHelper{}.generateBlobContainersAndBlobsFromLists(a, bsc, containerList, objectList, objectData) + scenarioHelper{}.generateFileSharesAndFilesFromLists(a, fsc, containerList, objectList, objectData) if testS3 { scenarioHelper{}.generateS3BucketsAndObjectsFromLists(a, s3Client, containerList, objectList, objectData) } @@ -78,8 +76,8 @@ func TestServiceTraverserWithManyObjects(t *testing.T) { defer func() { for _, v := range containerList { // create container URLs - blobContainer := bsu.NewContainerURL(v) - fileShare := fsu.NewShareURL(v) + cc := bsc.NewContainerClient(v) + sc := fsc.NewShareClient(v) // Ignore errors from cleanup. if testS3 { @@ -88,8 +86,8 @@ func TestServiceTraverserWithManyObjects(t *testing.T) { if testGCP { deleteGCPBucket(gcpClient, v, true) } - _, _ = blobContainer.Delete(ctx, azblob.ContainerAccessConditions{}) - _, _ = fileShare.Delete(ctx, azfile.DeleteSnapshotsOptionNone) + _, _ = cc.Delete(ctx, nil) + _, _ = sc.Delete(ctx, nil) } }() @@ -106,9 +104,8 @@ func TestServiceTraverserWithManyObjects(t *testing.T) { a.Nil(err) // construct a blob account traverser - blobPipeline := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) - rawBSU := scenarioHelper{}.getRawBlobServiceURLWithSAS(a) - blobAccountTraverser := newBlobAccountTraverser(&rawBSU, blobPipeline, ctx, false, func(common.EntityType) {}, false, common.CpkOptions{}, common.EPreservePermissionsOption.None(), false) + rawBSU := scenarioHelper{}.getBlobServiceClientWithSAS(a) + blobAccountTraverser := newBlobAccountTraverser(rawBSU, "", ctx, false, func(common.EntityType) {}, false, common.CpkOptions{}, common.EPreservePermissionsOption.None(), false) // invoke the blob account traversal with a dummy processor blobDummyProcessor := dummyProcessor{} @@ -116,9 +113,8 @@ func TestServiceTraverserWithManyObjects(t *testing.T) { a.Nil(err) // construct a file account traverser - filePipeline := azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{}) - rawFSU := scenarioHelper{}.getRawFileServiceURLWithSAS(a) - fileAccountTraverser := newFileAccountTraverser(&rawFSU, filePipeline, ctx, false, func(common.EntityType) {}, common.ETrailingDotOption.Enable(), nil) + rawFSU := scenarioHelper{}.getFileServiceClientWithSAS(a) + fileAccountTraverser := newFileAccountTraverser(rawFSU, "", ctx, false, func(common.EntityType) {}, common.ETrailingDotOption.Enable(), nil) // invoke the file account traversal with a dummy processor fileDummyProcessor := dummyProcessor{} @@ -182,8 +178,8 @@ func TestServiceTraverserWithManyObjects(t *testing.T) { func TestServiceTraverserWithWildcards(t *testing.T) { a := assert.New(t) - bsu := getBSU() - fsu := getFSU() + bsc := getBlobServiceClient() + fsc := getFileServiceClient() testS3 := false // Only test S3 if credentials are present. testGCP := false @@ -208,8 +204,8 @@ func TestServiceTraverserWithWildcards(t *testing.T) { if testGCP { cleanGCPAccount(gcpClient) } - cleanBlobAccount(a, bsu) - cleanFileAccount(a, fsu) + cleanBlobAccount(a, bsc) + cleanFileAccount(a, fsc) containerList := []string{ generateName("objectmatchone", 63), @@ -234,8 +230,8 @@ func TestServiceTraverserWithWildcards(t *testing.T) { objectData := "Hello world!" // Generate remote scenarios - scenarioHelper{}.generateBlobContainersAndBlobsFromLists(a, bsu, containerList, objectList, objectData) - scenarioHelper{}.generateFileSharesAndFilesFromLists(a, fsu, containerList, objectList, objectData) + scenarioHelper{}.generateBlobContainersAndBlobsFromLists(a, bsc, containerList, objectList, objectData) + scenarioHelper{}.generateFileSharesAndFilesFromLists(a, fsc, containerList, objectList, objectData) if testS3 { scenarioHelper{}.generateS3BucketsAndObjectsFromLists(a, s3Client, containerList, objectList, objectData) } @@ -247,8 +243,8 @@ func TestServiceTraverserWithWildcards(t *testing.T) { defer func() { for _, v := range containerList { // create container URLs - blobContainer := bsu.NewContainerURL(v) - fileShare := fsu.NewShareURL(v) + cc := bsc.NewContainerClient(v) + sc := fsc.NewShareClient(v) // Ignore errors from cleanup. if testS3 { @@ -257,8 +253,8 @@ func TestServiceTraverserWithWildcards(t *testing.T) { if testGCP { deleteGCPBucket(gcpClient, v, true) } - _, _ = blobContainer.Delete(ctx, azblob.ContainerAccessConditions{}) - _, _ = fileShare.Delete(ctx, azfile.DeleteSnapshotsOptionNone) + _, _ = cc.Delete(ctx, nil) + _, _ = sc.Delete(ctx, nil) } }() @@ -275,11 +271,9 @@ func TestServiceTraverserWithWildcards(t *testing.T) { a.Nil(err) // construct a blob account traverser - blobPipeline := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) - rawBSU := scenarioHelper{}.getRawBlobServiceURLWithSAS(a) - rawBSU.Path = "/objectmatch*" // set the container name to contain a wildcard - blobAccountTraverser := newBlobAccountTraverser(&rawBSU, blobPipeline, ctx, false, func(common.EntityType) {}, false, common.CpkOptions{}, common.EPreservePermissionsOption.None(), false) - + rawBSU := scenarioHelper{}.getBlobServiceClientWithSAS(a) + container := "objectmatch*" // set the container name to contain a wildcard + blobAccountTraverser := newBlobAccountTraverser(rawBSU, container, ctx, false, func(common.EntityType) {}, false, common.CpkOptions{}, common.EPreservePermissionsOption.None(), false) // invoke the blob account traversal with a dummy processor blobDummyProcessor := dummyProcessor{} @@ -287,10 +281,9 @@ func TestServiceTraverserWithWildcards(t *testing.T) { a.Nil(err) // construct a file account traverser - filePipeline := azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{}) - rawFSU := scenarioHelper{}.getRawFileServiceURLWithSAS(a) - rawFSU.Path = "/objectmatch*" // set the container name to contain a wildcard - fileAccountTraverser := newFileAccountTraverser(&rawFSU, filePipeline, ctx, false, func(common.EntityType) {}, common.ETrailingDotOption.Enable(), nil) + rawFSU := scenarioHelper{}.getFileServiceClientWithSAS(a) + share := "objectmatch*" // set the container name to contain a wildcard + fileAccountTraverser := newFileAccountTraverser(rawFSU, share, ctx, false, func(common.EntityType) {}, common.ETrailingDotOption.Enable(), nil) // invoke the file account traversal with a dummy processor fileDummyProcessor := dummyProcessor{} diff --git a/cmd/zt_generic_traverser_test.go b/cmd/zt_generic_traverser_test.go index 6f061e851..b80cd5b3c 100644 --- a/cmd/zt_generic_traverser_test.go +++ b/cmd/zt_generic_traverser_test.go @@ -23,6 +23,8 @@ package cmd import ( "context" "github.com/Azure/azure-pipeline-go/pipeline" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" "github.com/stretchr/testify/assert" "io" "os" @@ -34,8 +36,6 @@ import ( gcpUtils "cloud.google.com/go/storage" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" "github.com/minio/minio-go" chk "gopkg.in/check.v1" @@ -82,7 +82,30 @@ func TestLocalWildcardOverlap(t *testing.T) { resource, err := SplitResourceString(filepath.Join(tmpDir, "tes*t.txt"), common.ELocation.Local()) a.Nil(err) - traverser, err := InitResourceTraverser(resource, common.ELocation.Local(), nil, nil, common.ESymlinkHandlingType.Follow(), nil, true, false, false, common.EPermanentDeleteOption.None(), nil, nil, false, common.ESyncHashType.None(), common.EPreservePermissionsOption.None(), pipeline.LogInfo, common.CpkOptions{}, nil, true, common.ETrailingDotOption.Enable(), nil, nil) + traverser, err := InitResourceTraverser( + resource, + common.ELocation.Local(), + nil, + nil, + common.ESymlinkHandlingType.Follow(), + nil, + true, + false, + false, + common.EPermanentDeleteOption.None(), + nil, + nil, + false, + common.ESyncHashType.None(), + common.EPreservePermissionsOption.None(), + pipeline.LogInfo, + common.CpkOptions{}, + nil, + true, + common.ETrailingDotOption.Enable(), + nil, + nil, + ) a.Nil(err) seenFiles := make(map[string]bool) @@ -104,37 +127,37 @@ func TestLocalWildcardOverlap(t *testing.T) { // While BlobFS could get properties in the future, it's currently disabled as BFS source S2S isn't set up right now, and likely won't be. func TestFilesGetProperties(t *testing.T) { a := assert.New(t) - fsu := getFSU() - share, shareName := createNewAzureShare(a, fsu) + fsc := getFileServiceClient() + sc, shareName := createNewShare(a, fsc) fileName := generateAzureFileName() - headers := azfile.FileHTTPHeaders{ - ContentType: "text/random", - ContentEncoding: "testEncoding", - ContentLanguage: "en-US", - ContentDisposition: "testDisposition", - CacheControl: "testCacheControl", + headers := file.HTTPHeaders{ + ContentType: to.Ptr("text/random"), + ContentEncoding: to.Ptr("testEncoding"), + ContentLanguage: to.Ptr("en-US"), + ContentDisposition: to.Ptr("testDisposition"), + CacheControl: to.Ptr("testCacheControl"), } - scenarioHelper{}.generateAzureFilesFromList(a, share, []string{fileName}) - _, err := share.NewRootDirectoryURL().NewFileURL(fileName).SetHTTPHeaders(ctx, headers) + scenarioHelper{}.generateShareFilesFromList(a, sc, fsc, []string{fileName}) + _, err := sc.NewRootDirectoryClient().NewFileClient(fileName).SetHTTPHeaders(ctx, &file.SetHTTPHeadersOptions{HTTPHeaders: &headers}) a.Nil(err) - shareURL := scenarioHelper{}.getRawShareURLWithSAS(a, shareName) + shareURL := scenarioHelper{}.getRawShareURLWithSAS(a, shareName).String() - pipeline := azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{}) + serviceClientWithSAS := scenarioHelper{}.getFileServiceClientWithSASFromURL(a, shareURL) // first test reading from the share itself - traverser := newFileTraverser(&shareURL, pipeline, ctx, false, true, func(common.EntityType) {}, common.ETrailingDotOption.Enable(), nil) + traverser := newFileTraverser(shareURL, serviceClientWithSAS, ctx, false, true, func(common.EntityType) {}, common.ETrailingDotOption.Enable(), nil) // embed the check into the processor for ease of use seenContentType := false processor := func(object StoredObject) error { if object.entityType == common.EEntityType.File() { // test all attributes (but only for files, since folders don't have them) - a.Equal(headers.ContentType, object.contentType) - a.Equal(headers.ContentEncoding, object.contentEncoding) - a.Equal(headers.ContentLanguage, object.contentLanguage) - a.Equal(headers.ContentDisposition, object.contentDisposition) - a.Equal(headers.CacheControl, object.cacheControl) + a.Equal(*headers.ContentType, object.contentType) + a.Equal(*headers.ContentEncoding, object.contentEncoding) + a.Equal(*headers.ContentLanguage, object.contentLanguage) + a.Equal(*headers.ContentDisposition, object.contentDisposition) + a.Equal(*headers.CacheControl, object.cacheControl) seenContentType = true } return nil @@ -146,8 +169,9 @@ func TestFilesGetProperties(t *testing.T) { // then test reading from the filename exactly, because that's a different codepath. seenContentType = false - fileURL := scenarioHelper{}.getRawFileURLWithSAS(a, shareName, fileName) - traverser = newFileTraverser(&fileURL, pipeline, ctx, false, true, func(common.EntityType) {}, common.ETrailingDotOption.Enable(), nil) + fileURL := scenarioHelper{}.getRawFileURLWithSAS(a, shareName, fileName).String() + serviceClientWithSAS = scenarioHelper{}.getFileServiceClientWithSASFromURL(a, shareURL) + traverser = newFileTraverser(fileURL, serviceClientWithSAS, ctx, false, true, func(common.EntityType) {}, common.ETrailingDotOption.Enable(), nil) err = traverser.Traverse(noPreProccessor, processor, nil) a.Nil(err) @@ -497,13 +521,13 @@ func TestWalkWithSymlinksToParentAndChild(t *testing.T) { // compare that the traversers get consistent results func TestTraverserWithSingleObject(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) - fsu := getFSU() - shareURL, shareName := createNewAzureShare(a, fsu) - defer deleteShare(a, shareURL) + fsc := getFileServiceClient() + sc, shareName := createNewShare(a, fsc) + defer deleteShare(a, sc) bfsu := GetBFSSU() filesystemURL, _ := createNewFilesystem(a, bfsu) @@ -528,7 +552,7 @@ func TestTraverserWithSingleObject(t *testing.T) { for _, storedObjectName := range []string{"sub1/sub2/singleblobisbest", "nosubsingleblob", "满汉全席.txt"} { // set up the container with a single blob blobList := []string{storedObjectName} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, blobList, blockBlobDefaultData) // set up the directory as a single file dstDirName := scenarioHelper{}.generateLocalDirectory(a) @@ -547,9 +571,9 @@ func TestTraverserWithSingleObject(t *testing.T) { // construct a blob traverser ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) - p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, blobList[0]) - blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, false, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) + rawBlobURLWithSAS := scenarioHelper{}.getBlobClientWithSAS(a, containerName, blobList[0]).URL() + blobServiceClientWithSAS := scenarioHelper{}.getBlobServiceClientWithSASFromURL(a, rawBlobURLWithSAS) + blobTraverser := newBlobTraverser(rawBlobURLWithSAS, blobServiceClientWithSAS, ctx, false, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) // invoke the blob traversal with a dummy processor blobDummyProcessor := dummyProcessor{} @@ -568,12 +592,12 @@ func TestTraverserWithSingleObject(t *testing.T) { if !strings.Contains(storedObjectName, "/") { // set up the Azure Share with a single file fileList := []string{storedObjectName} - scenarioHelper{}.generateAzureFilesFromList(a, shareURL, fileList) + scenarioHelper{}.generateShareFilesFromList(a, sc, fsc, fileList) // construct an Azure file traverser - filePipeline := azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{}) - rawFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(a, shareName, fileList[0]) - azureFileTraverser := newFileTraverser(&rawFileURLWithSAS, filePipeline, ctx, false, false, func(common.EntityType) {}, common.ETrailingDotOption.Enable(), nil) + rawFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(a, shareName, fileList[0]).String() + fileServiceClientWithSAS := scenarioHelper{}.getFileServiceClientWithSASFromURL(a, rawFileURLWithSAS) + azureFileTraverser := newFileTraverser(rawFileURLWithSAS, fileServiceClientWithSAS, ctx, false, false, func(common.EntityType) {}, common.ETrailingDotOption.Enable(), nil) // invoke the file traversal with a dummy processor fileDummyProcessor := dummyProcessor{} @@ -627,13 +651,13 @@ func TestTraverserWithSingleObject(t *testing.T) { // compare that traversers get consistent results func TestTraverserContainerAndLocalDirectory(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) - fsu := getFSU() - shareURL, shareName := createNewAzureShare(a, fsu) - defer deleteShare(a, shareURL) + fsc := getFileServiceClient() + sc, shareName := createNewShare(a, fsc) + defer deleteShare(a, sc) bfsu := GetBFSSU() filesystemURL, _ := createNewFilesystem(a, bfsu) @@ -655,11 +679,11 @@ func TestTraverserContainerAndLocalDirectory(t *testing.T) { } // set up the container with numerous blobs - fileList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - a.NotNil(containerURL) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + a.NotNil(cc) // set up an Azure File Share with the same files - scenarioHelper{}.generateAzureFilesFromList(a, shareURL, fileList) + scenarioHelper{}.generateShareFilesFromList(a, sc, fsc, fileList) // set up a filesystem with the same files scenarioHelper{}.generateBFSPathsFromList(a, filesystemURL, fileList) @@ -689,9 +713,9 @@ func TestTraverserContainerAndLocalDirectory(t *testing.T) { // construct a blob traverser ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) - p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) - blobTraverser := newBlobTraverser(&rawContainerURLWithSAS, p, ctx, isRecursiveOn, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) + rawContainerURLWithSAS := scenarioHelper{}.getContainerClientWithSAS(a, containerName).URL() + blobServiceClientWithSAS := scenarioHelper{}.getBlobServiceClientWithSASFromURL(a, rawContainerURLWithSAS) + blobTraverser := newBlobTraverser(rawContainerURLWithSAS, blobServiceClientWithSAS, ctx, isRecursiveOn, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) // invoke the local traversal with a dummy processor blobDummyProcessor := dummyProcessor{} @@ -699,9 +723,9 @@ func TestTraverserContainerAndLocalDirectory(t *testing.T) { a.Nil(err) // construct an Azure File traverser - filePipeline := azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{}) - rawFileURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, shareName) - azureFileTraverser := newFileTraverser(&rawFileURLWithSAS, filePipeline, ctx, isRecursiveOn, false, func(common.EntityType) {}, common.ETrailingDotOption.Enable(), nil) + rawShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, shareName).String() + fileServiceClientWithSAS := scenarioHelper{}.getFileServiceClientWithSASFromURL(a, rawShareURLWithSAS) + azureFileTraverser := newFileTraverser(rawShareURLWithSAS, fileServiceClientWithSAS, ctx, isRecursiveOn, false, func(common.EntityType) {}, common.ETrailingDotOption.Enable(), nil) // invoke the file traversal with a dummy processor fileDummyProcessor := dummyProcessor{} @@ -773,13 +797,13 @@ func TestTraverserContainerAndLocalDirectory(t *testing.T) { // compare that blob and local traversers get consistent results func TestTraverserWithVirtualAndLocalDirectory(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) - fsu := getFSU() - shareURL, shareName := createNewAzureShare(a, fsu) - defer deleteShare(a, shareURL) + fsc := getFileServiceClient() + sc, shareName := createNewShare(a, fsc) + defer deleteShare(a, sc) bfsu := GetBFSSU() filesystemURL, _ := createNewFilesystem(a, bfsu) @@ -801,11 +825,11 @@ func TestTraverserWithVirtualAndLocalDirectory(t *testing.T) { // set up the container with numerous blobs virDirName := "virdir" - fileList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, virDirName+"/") - a.NotNil(containerURL) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, virDirName+"/") + a.NotNil(cc) // set up an Azure File Share with the same files - scenarioHelper{}.generateAzureFilesFromList(a, shareURL, fileList) + scenarioHelper{}.generateShareFilesFromList(a, sc, fsc, fileList) // set up the filesystem with the same files scenarioHelper{}.generateBFSPathsFromList(a, filesystemURL, fileList) @@ -838,9 +862,9 @@ func TestTraverserWithVirtualAndLocalDirectory(t *testing.T) { // construct a blob traverser ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) - p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) - rawVirDirURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, virDirName) - blobTraverser := newBlobTraverser(&rawVirDirURLWithSAS, p, ctx, isRecursiveOn, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) + rawVirDirURLWithSAS := scenarioHelper{}.getBlobClientWithSAS(a, containerName, virDirName).URL() + serviceClientWithSAS := scenarioHelper{}.getBlobServiceClientWithSASFromURL(a, rawVirDirURLWithSAS) + blobTraverser := newBlobTraverser(rawVirDirURLWithSAS, serviceClientWithSAS, ctx, isRecursiveOn, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) // invoke the local traversal with a dummy processor blobDummyProcessor := dummyProcessor{} @@ -848,9 +872,9 @@ func TestTraverserWithVirtualAndLocalDirectory(t *testing.T) { a.Nil(err) // construct an Azure File traverser - filePipeline := azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{}) - rawFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(a, shareName, virDirName) - azureFileTraverser := newFileTraverser(&rawFileURLWithSAS, filePipeline, ctx, isRecursiveOn, false, func(common.EntityType) {}, common.ETrailingDotOption.Enable(), nil) + rawFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(a, shareName, virDirName).String() + fileServiceClientWithSAS := scenarioHelper{}.getFileServiceClientWithSASFromURL(a, rawFileURLWithSAS) + azureFileTraverser := newFileTraverser(rawFileURLWithSAS, fileServiceClientWithSAS, ctx, isRecursiveOn, false, func(common.EntityType) {}, common.ETrailingDotOption.Enable(), nil) // invoke the file traversal with a dummy processor fileDummyProcessor := dummyProcessor{} @@ -922,25 +946,25 @@ func TestTraverserWithVirtualAndLocalDirectory(t *testing.T) { // compare that the serial and parallel blob traversers get consistent results func TestSerialAndParallelBlobTraverser(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) // set up the container with numerous blobs virDirName := "virdir" - scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, virDirName+"/") - a.NotNil(containerURL) + scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, virDirName+"/") + a.NotNil(cc) // test two scenarios, either recursive or not for _, isRecursiveOn := range []bool{true, false} { // construct a parallel blob traverser ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) - p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) - rawVirDirURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, virDirName) - parallelBlobTraverser := newBlobTraverser(&rawVirDirURLWithSAS, p, ctx, isRecursiveOn, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) + rawVirDirURLWithSAS := scenarioHelper{}.getBlobClientWithSAS(a, containerName, virDirName).URL() + serviceClientWithSAS := scenarioHelper{}.getBlobServiceClientWithSASFromURL(a, rawVirDirURLWithSAS) + parallelBlobTraverser := newBlobTraverser(rawVirDirURLWithSAS, serviceClientWithSAS, ctx, isRecursiveOn, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) // construct a serial blob traverser - serialBlobTraverser := newBlobTraverser(&rawVirDirURLWithSAS, p, ctx, isRecursiveOn, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) + serialBlobTraverser := newBlobTraverser(rawVirDirURLWithSAS, serviceClientWithSAS, ctx, isRecursiveOn, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) serialBlobTraverser.parallelListing = false // invoke the parallel traversal with a dummy processor diff --git a/cmd/zt_make_test.go b/cmd/zt_make_test.go new file mode 100644 index 000000000..564405f39 --- /dev/null +++ b/cmd/zt_make_test.go @@ -0,0 +1,194 @@ +// Copyright © 2017 Microsoft +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package cmd + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func runMakeAndVerify(raw rawMakeCmdArgs, verifier func(err error)) { + // the simulated user input should parse properly + cooked, err := raw.cook() + if err != nil { + verifier(err) + return + } + + // the enumeration ends when process() returns + err = cooked.process() + + // the err is passed to verified, which knows whether it is expected or not + verifier(err) +} + +func TestMakeBlobContainer(t *testing.T) { + a := assert.New(t) + bsc := getBlobServiceClient() + cc, name := getContainerClient(a, bsc) + defer deleteContainer(a, cc) + + bscSAS := scenarioHelper{}.getBlobServiceClientWithSAS(a) + ccSAS := bscSAS.NewContainerClient(name) + + args := rawMakeCmdArgs{ + resourceToCreate: ccSAS.URL(), + } + + runMakeAndVerify(args, func(err error) { + a.Nil(err) + _, err = cc.GetProperties(ctx, nil) + a.Nil(err) + }) +} + +func TestMakeBlobContainerExists(t *testing.T) { + a := assert.New(t) + bsc := getBlobServiceClient() + cc, name := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + + bscSAS := scenarioHelper{}.getBlobServiceClientWithSAS(a) + ccSAS := bscSAS.NewContainerClient(name) + + args := rawMakeCmdArgs{ + resourceToCreate: ccSAS.URL(), + } + + runMakeAndVerify(args, func(err error) { + a.NotNil(err) + a.Equal("the container already exists", err.Error()) + _, err = cc.GetProperties(ctx, nil) + a.Nil(err) + }) +} + +func TestMakeBlobFSFilesystem(t *testing.T) { + a := assert.New(t) + bsc := GetBFSSU() + fsc, name := getFilesystemURL(a, bsc) + defer deleteFilesystem(a, fsc) + + bscSAS := scenarioHelper{}.getRawAdlsServiceURLWithSAS(a) + ccSAS := bscSAS.NewFileSystemURL(name) + + args := rawMakeCmdArgs{ + resourceToCreate: ccSAS.String(), + } + + runMakeAndVerify(args, func(err error) { + a.Nil(err) + _, err = fsc.GetProperties(ctx) + a.Nil(err) + }) +} + +func TestMakeBlobFSFilesystemExists(t *testing.T) { + a := assert.New(t) + bsc := GetBFSSU() + fsc, name := getFilesystemURL(a, bsc) + _, err := fsc.Create(ctx) + a.Nil(err) + defer deleteFilesystem(a, fsc) + + bscSAS := scenarioHelper{}.getRawAdlsServiceURLWithSAS(a) + ccSAS := bscSAS.NewFileSystemURL(name) + + args := rawMakeCmdArgs{ + resourceToCreate: ccSAS.String(), + } + + runMakeAndVerify(args, func(err error) { + a.NotNil(err) + a.Equal("the file system already exists", err.Error()) + _, err = fsc.GetProperties(ctx) + a.Nil(err) + }) +} + +func TestMakeFileShare(t *testing.T) { + a := assert.New(t) + fsc := getFileServiceClient() + sc, name := getShareClient(a, fsc) + defer deleteShare(a, sc) + + fscSAS := scenarioHelper{}.getRawFileServiceURLWithSAS(a) + scSAS := fscSAS + scSAS.Path = name + + args := rawMakeCmdArgs{ + resourceToCreate: scSAS.String(), + } + + runMakeAndVerify(args, func(err error) { + a.Nil(err) + props, err := sc.GetProperties(ctx, nil) + a.Nil(err) + a.EqualValues(5120, *props.Quota) + }) +} + +func TestMakeFileShareQuota(t *testing.T) { + a := assert.New(t) + fsc := getFileServiceClient() + sc, name := getShareClient(a, fsc) + defer deleteShare(a, sc) + + fscSAS := scenarioHelper{}.getRawFileServiceURLWithSAS(a) + scSAS := fscSAS + scSAS.Path = name + + args := rawMakeCmdArgs{ + resourceToCreate: scSAS.String(), + quota: 5, + } + + runMakeAndVerify(args, func(err error) { + a.Nil(err) + props, err := sc.GetProperties(ctx, nil) + a.Nil(err) + a.EqualValues(args.quota, *props.Quota) + }) +} + +func TestMakeFileShareExists(t *testing.T) { + a := assert.New(t) + fsc := getFileServiceClient() + sc, name := getShareClient(a, fsc) + _, err := sc.Create(ctx, nil) + a.Nil(err) + defer deleteShare(a, sc) + + fscSAS := scenarioHelper{}.getRawFileServiceURLWithSAS(a) + scSAS := fscSAS + scSAS.Path = name + + args := rawMakeCmdArgs{ + resourceToCreate: scSAS.String(), + } + + runMakeAndVerify(args, func(err error) { + a.NotNil(err) + a.Equal("the file share already exists", err.Error()) + _, err = sc.GetProperties(ctx, nil) + a.Nil(err) + }) +} \ No newline at end of file diff --git a/cmd/zt_overwrite_posix_properties_test.go b/cmd/zt_overwrite_posix_properties_test.go index 79fb94c60..ecac5d1ff 100644 --- a/cmd/zt_overwrite_posix_properties_test.go +++ b/cmd/zt_overwrite_posix_properties_test.go @@ -22,6 +22,8 @@ package cmd import ( "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" "github.com/stretchr/testify/assert" "os" "path/filepath" @@ -31,7 +33,6 @@ import ( "time" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" ) func TestOverwritePosixProperties(t *testing.T) { @@ -39,10 +40,9 @@ func TestOverwritePosixProperties(t *testing.T) { if runtime.GOOS != "linux" { t.Skip("This test will run only on linux") } - - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + containerClient, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, containerClient) files := []string{ "filea", @@ -89,8 +89,11 @@ func TestOverwritePosixProperties(t *testing.T) { validateDownloadTransfersAreScheduled(a, "/", "/"+filepath.Base(dirPath)+"/", files[:], mockedRPC) }) - listBlob, err := containerURL.ListBlobsFlatSegment(context.TODO(), azblob.Marker{}, - azblob.ListBlobsSegmentOptions{Details: azblob.BlobListingDetails{Metadata: true, Tags: true}, Prefix: filepath.Base(dirPath)}) + pager := containerClient.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{ + Include: container.ListBlobsInclude{Metadata: true, Tags: true}, + Prefix: to.Ptr(filepath.Base(dirPath)), + }) + listBlob, err := pager.NextPage(context.TODO()) a.Nil(err) diff --git a/cmd/zt_remove_blob_test.go b/cmd/zt_remove_blob_test.go index 0a15b6cdc..d77f91b2b 100644 --- a/cmd/zt_remove_blob_test.go +++ b/cmd/zt_remove_blob_test.go @@ -23,8 +23,12 @@ package cmd import ( "encoding/json" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + blobsas "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" + blobservice "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" "github.com/stretchr/testify/assert" "log" "net/url" @@ -36,15 +40,15 @@ import ( func TestRemoveSingleBlob(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) for _, blobName := range []string{"top/mid/low/singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobList, blockBlobDefaultData) - a.NotNil(containerURL) + scenarioHelper{}.generateBlobsFromList(a, cc, blobList, blockBlobDefaultData) + a.NotNil(cc) // set up interceptor mockedRPC := interceptor{} @@ -66,13 +70,13 @@ func TestRemoveSingleBlob(t *testing.T) { func TestRemoveBlobsUnderContainer(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + a.NotNil(cc) a.NotZero(len(blobList)) // set up interceptor @@ -113,14 +117,14 @@ func TestRemoveBlobsUnderContainer(t *testing.T) { func TestRemoveBlobsUnderVirtualDir(t *testing.T) { a := assert.New(t) t.Skip("Enable after setting Account to non-HNS") - bsu := getBSU() + bsc := getBlobServiceClient() vdirName := "vdir1/vdir2/vdir3/" // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, vdirName) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, vdirName) + a.NotNil(cc) a.NotZero(len(blobList)) // set up interceptor @@ -161,18 +165,18 @@ func TestRemoveBlobsUnderVirtualDir(t *testing.T) { // include flag limits the scope of the delete func TestRemoveWithIncludeFlag(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, blobsToInclude, blockBlobDefaultData) includeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -195,18 +199,18 @@ func TestRemoveWithIncludeFlag(t *testing.T) { // exclude flag limits the scope of the delete func TestRemoveWithExcludeFlag(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotZero(len(blobList)) // add special blobs that we wish to exclude blobsToExclude := []string{"notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, blobsToExclude, blockBlobDefaultData) excludeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -230,24 +234,24 @@ func TestRemoveWithExcludeFlag(t *testing.T) { // include and exclude flag can work together to limit the scope of the delete func TestRemoveWithIncludeAndExcludeFlag(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, blobsToInclude, blockBlobDefaultData) includeString := "*.pdf;*.jpeg;exactName" // add special blobs that we wish to exclude // note that the excluded files also match the include string blobsToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, blobsToExclude, blockBlobDefaultData) excludeString := "so*;not*;exactName" // set up interceptor @@ -272,15 +276,15 @@ func TestRemoveWithIncludeAndExcludeFlag(t *testing.T) { func TestRemoveListOfBlobsAndVirtualDirs(t *testing.T) { a := assert.New(t) t.Skip("Enable after setting Account to non-HNS") - bsu := getBSU() + bsc := getBlobServiceClient() vdirName := "megadir" // set up the container with numerous blobs and a vdir - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) - blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - blobListPart2 := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, vdirName+"/") + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + a.NotNil(cc) + blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + blobListPart2 := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, vdirName+"/") blobList := append(blobListPart1, blobListPart2...) a.NotZero(len(blobList)) @@ -336,25 +340,25 @@ func TestRemoveListOfBlobsAndVirtualDirs(t *testing.T) { // note: list-of-files flag is used func TestRemoveListOfBlobsWithIncludeAndExclude(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() vdirName := "megadir" // set up the container with numerous blobs and a vdir - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) - blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, vdirName+"/") + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + a.NotNil(cc) + blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, vdirName+"/") // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, blobsToInclude, blockBlobDefaultData) includeString := "*.pdf;*.jpeg;exactName" // add special blobs that we wish to exclude // note that the excluded files also match the include string blobsToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, blobsToExclude, blockBlobDefaultData) excludeString := "so*;not*;exactName" // set up interceptor @@ -395,14 +399,14 @@ func TestRemoveListOfBlobsWithIncludeAndExclude(t *testing.T) { func TestRemoveBlobsWithDirectoryStubs(t *testing.T) { a := assert.New(t) t.Skip("Enable after setting Account to non-HNS") - bsu := getBSU() + bsc := getBlobServiceClient() vdirName := "vdir1/" // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - blobAndDirStubsList := scenarioHelper{}.generateCommonRemoteScenarioForWASB(a, containerURL, vdirName) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + blobAndDirStubsList := scenarioHelper{}.generateCommonRemoteScenarioForWASB(a, cc, vdirName) + a.NotNil(cc) a.NotZero(len(blobAndDirStubsList)) // set up interceptor @@ -446,19 +450,19 @@ func TestRemoveBlobsWithDirectoryStubs(t *testing.T) { func TestRemoveBlobsWithDirectoryStubsWithListOfFiles(t *testing.T) { a := assert.New(t) t.Skip("Enable after setting Account to non-HNS") - bsu := getBSU() + bsc := getBlobServiceClient() vdirName := "vdir1/" // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - blobAndDirStubsList := scenarioHelper{}.generateCommonRemoteScenarioForWASB(a, containerURL, vdirName) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + blobAndDirStubsList := scenarioHelper{}.generateCommonRemoteScenarioForWASB(a, cc, vdirName) + a.NotNil(cc) a.NotZero(len(blobAndDirStubsList)) // set up another empty dir vdirName2 := "emptydir" - createNewDirectoryStub(a, containerURL, vdirName2) + createNewDirectoryStub(a, cc, vdirName2) blobAndDirStubsList = append(blobAndDirStubsList, vdirName2) // set up interceptor @@ -497,14 +501,14 @@ func TestRemoveBlobsWithDirectoryStubsWithListOfFiles(t *testing.T) { func TestDryrunRemoveSingleBlob(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) // set up the container with a single blob blobName := []string{"sub1/test/testing.txt"} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobName, blockBlobDefaultData) - a.NotNil(containerURL) + scenarioHelper{}.generateBlobsFromList(a, cc, blobName, blockBlobDefaultData) + a.NotNil(cc) // set up interceptor mockedRPC := interceptor{} @@ -526,21 +530,21 @@ func TestDryrunRemoveSingleBlob(t *testing.T) { msg := <-mockedLcm.dryrunLog // comparing message printed for dry run a.True(strings.Contains(msg, "DRYRUN: remove")) - a.True(strings.Contains(msg, containerURL.String())) + a.True(strings.Contains(msg, cc.URL())) a.True(strings.Contains(msg, blobName[0])) }) } func TestDryrunRemoveBlobsUnderContainer(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) // set up the container with a single blob blobList := []string{"AzURE2021.jpeg", "sub1/dir2/HELLO-4.txt", "sub1/test/testing.txt"} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobList, blockBlobDefaultData) - a.NotNil(containerURL) + scenarioHelper{}.generateBlobsFromList(a, cc, blobList, blockBlobDefaultData) + a.NotNil(cc) // set up interceptor mockedRPC := interceptor{} @@ -563,7 +567,7 @@ func TestDryrunRemoveBlobsUnderContainer(t *testing.T) { msg := mockedLcm.GatherAllLogs(mockedLcm.dryrunLog) for i := 0; i < len(blobList); i++ { a.True(strings.Contains(msg[i], "DRYRUN: remove")) - a.True(strings.Contains(msg[i], containerURL.String())) + a.True(strings.Contains(msg[i], cc.URL())) } a.True(testDryrunStatements(blobList, msg)) @@ -572,14 +576,14 @@ func TestDryrunRemoveBlobsUnderContainer(t *testing.T) { func TestDryrunRemoveBlobsUnderContainerJson(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) // set up the container with a single blob blobName := []string{"tech.txt"} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobName, blockBlobDefaultData) - a.NotNil(containerURL) + scenarioHelper{}.generateBlobsFromList(a, cc, blobName, blockBlobDefaultData) + a.NotNil(cc) // set up interceptor mockedRPC := interceptor{} @@ -612,15 +616,15 @@ func TestDryrunRemoveBlobsUnderContainerJson(t *testing.T) { func TestRemoveSingleBlobWithFromTo(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) for _, blobName := range []string{"top/mid/low/singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobList, blockBlobDefaultData) - a.NotNil(containerURL) + scenarioHelper{}.generateBlobsFromList(a, cc, blobList, blockBlobDefaultData) + a.NotNil(cc) // set up interceptor mockedRPC := interceptor{} @@ -643,13 +647,13 @@ func TestRemoveSingleBlobWithFromTo(t *testing.T) { func TestRemoveBlobsUnderContainerWithFromTo(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + a.NotNil(cc) a.NotZero(len(blobList)) // set up interceptor @@ -691,14 +695,14 @@ func TestRemoveBlobsUnderContainerWithFromTo(t *testing.T) { func TestRemoveBlobsUnderVirtualDirWithFromTo(t *testing.T) { a := assert.New(t) t.Skip("Enable after setting Account to non-HNS") - bsu := getBSU() + bsc := getBlobServiceClient() vdirName := "vdir1/vdir2/vdir3/" // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, vdirName) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, vdirName) + a.NotNil(cc) a.NotZero(len(blobList)) // set up interceptor @@ -739,19 +743,24 @@ func TestRemoveBlobsUnderVirtualDirWithFromTo(t *testing.T) { func TestPermDeleteSnapshotsVersionsUnderSingleBlob(t *testing.T) { a := assert.New(t) - serviceURL := setUpAccountPermDelete(a) + bsc := setUpAccountPermDelete(a) os.Setenv("AZCOPY_DISABLE_HIERARCHICAL_SCAN", "true") time.Sleep(time.Second * 10) // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, serviceURL) - defer deleteContainer(a, containerURL) - blobName, blobList, _ := scenarioHelper{}.generateCommonRemoteScenarioForSoftDelete(a, containerURL, "") - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + blobName, blobList, _ := scenarioHelper{}.generateCommonRemoteScenarioForSoftDelete(a, cc, "") + a.NotNil(cc) a.Equal(3, len(blobList)) - list, _ := containerURL.ListBlobsFlatSegment(ctx, azblob.Marker{}, azblob.ListBlobsSegmentOptions{Details: azblob.BlobListingDetails{Deleted: true, Snapshots: true}, Prefix: blobName}) + pager := cc.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{ + Prefix: to.Ptr(blobName), + Include: container.ListBlobsInclude{Deleted: true, Snapshots: true}, + }) + list, err := pager.NextPage(ctx) + a.Nil(err) a.NotNil(list.Segment.BlobItems) a.Equal(4, len(list.Segment.BlobItems)) @@ -775,16 +784,16 @@ func TestPermDeleteSnapshotsVersionsUnderSingleBlob(t *testing.T) { func TestPermDeleteSnapshotsVersionsUnderContainer(t *testing.T) { a := assert.New(t) - serviceURL := setUpAccountPermDelete(a) + bsc := setUpAccountPermDelete(a) os.Setenv("AZCOPY_DISABLE_HIERARCHICAL_SCAN", "true") time.Sleep(time.Second * 10) // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, serviceURL) - defer deleteContainer(a, containerURL) - _, blobList, listOfTransfers := scenarioHelper{}.generateCommonRemoteScenarioForSoftDelete(a, containerURL, "") - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + _, blobList, listOfTransfers := scenarioHelper{}.generateCommonRemoteScenarioForSoftDelete(a, cc, "") + a.NotNil(cc) a.Equal(3, len(blobList)) // set up interceptor @@ -805,34 +814,37 @@ func TestPermDeleteSnapshotsVersionsUnderContainer(t *testing.T) { }) } -func setUpAccountPermDelete(a *assert.Assertions) azblob.ServiceURL { +func setUpAccountPermDelete(a *assert.Assertions) *blobservice.Client { accountName, accountKey := getAccountAndKey() - u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName)) + rawURL := fmt.Sprintf("https://%s.blob.core.windows.net/", accountName) - credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) + credential, err := blob.NewSharedKeyCredential(accountName, accountKey) if err != nil { panic(err) } - sasQueryParams, err := azblob.AccountSASSignatureValues{ - Protocol: azblob.SASProtocolHTTPS, - ExpiryTime: time.Now().UTC().Add(12 * time.Hour), // 12 hr long sas - Permissions: azblob.AccountSASPermissions{Read: true, List: true, Write: true, Create: true, PermanentDelete: true, Delete: true, DeletePreviousVersion: true}.String(), - Services: azblob.AccountSASServices{Blob: true}.String(), - ResourceTypes: azblob.AccountSASResourceTypes{Service: true, Container: true, Object: true}.String(), - }.NewSASQueryParameters(credential) + client, err := blobservice.NewClientWithSharedKeyCredential(rawURL, credential, nil) if err != nil { log.Fatal(err) } - qp := sasQueryParams.Encode() - accountURLWithSAS := fmt.Sprintf("https://%s.blob.core.windows.net?%s", accountName, qp) - u, _ = url.Parse(accountURLWithSAS) - serviceURL := azblob.NewServiceURL(*u, azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})) - days := int32(5) - allowDelete := true - _, err = serviceURL.SetProperties(ctx, azblob.StorageServiceProperties{DeleteRetentionPolicy: &azblob.RetentionPolicy{Enabled: true, Days: &days, AllowPermanentDelete: &allowDelete}}) + sasURL, err := client.GetSASURL( + blobsas.AccountResourceTypes{Service: true, Container: true, Object: true}, + blobsas.AccountPermissions{Read: true, List: true, Write: true, Delete: true, PermanentDelete: true, DeletePreviousVersion: true, Add: true, Create: true, Update: true, Process: true, Tag: true}, + time.Now().Add(12*time.Hour), + nil) + + if err != nil { + log.Fatal(err) + } + client, err = blobservice.NewClientWithNoCredential(sasURL, nil) + if err != nil { + log.Fatal(err) + } + _, err = client.SetProperties(ctx, &blobservice.SetPropertiesOptions{ + DeleteRetentionPolicy: &blobservice.RetentionPolicy{Enabled: to.Ptr(true), Days: to.Ptr(int32(5)), AllowPermanentDelete: to.Ptr(true)}, + }) a.Nil(err) - return serviceURL + return client } diff --git a/cmd/zt_remove_copy_test.go b/cmd/zt_remove_copy_test.go index ce47be8bd..b2e5cbfc7 100644 --- a/cmd/zt_remove_copy_test.go +++ b/cmd/zt_remove_copy_test.go @@ -9,18 +9,18 @@ import ( func TestCopyBlobsWithDirectoryStubsS2S(t *testing.T) { a := assert.New(t) t.Skip("Enable after setting Account to non-HNS") - bsu := getBSU() + bsc := getBlobServiceClient() vdirName := "vdir1/" // create container and dest container - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstContainerURL, dstContainerName := createNewContainer(a, bsu) + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) dstBlobName := "testcopyblobswithdirectorystubs" + generateBlobName() - defer deleteContainer(a, srcContainerURL) - defer deleteContainer(a, dstContainerURL) + defer deleteContainer(a, srcContainerClient) + defer deleteContainer(a, dstContainerClient) - blobAndDirStubsList := scenarioHelper{}.generateCommonRemoteScenarioForWASB(a, srcContainerURL, vdirName) - a.NotNil(srcContainerURL) + blobAndDirStubsList := scenarioHelper{}.generateCommonRemoteScenarioForWASB(a, srcContainerClient, vdirName) + a.NotNil(srcContainerClient) a.NotZero(len(blobAndDirStubsList)) // set up interceptor diff --git a/cmd/zt_remove_file_test.go b/cmd/zt_remove_file_test.go index 542644627..2d96e3406 100644 --- a/cmd/zt_remove_file_test.go +++ b/cmd/zt_remove_file_test.go @@ -30,15 +30,15 @@ import ( func TestRemoveSingleFile(t *testing.T) { a := assert.New(t) - fsu := getFSU() - shareURL, shareName := createNewAzureShare(a, fsu) - defer deleteShare(a, shareURL) + fsc := getFileServiceClient() + shareClient, shareName := createNewShare(a, fsc) + defer deleteShare(a, shareClient) for _, fileName := range []string{"top/mid/low/singlefileisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the share with a single file fileList := []string{fileName} - scenarioHelper{}.generateAzureFilesFromList(a, shareURL, fileList) - a.NotNil(shareURL) + scenarioHelper{}.generateShareFilesFromList(a, shareClient, fsc, fileList) + a.NotNil(shareClient) // set up interceptor mockedRPC := interceptor{} @@ -60,13 +60,13 @@ func TestRemoveSingleFile(t *testing.T) { func TestRemoveFilesUnderShare(t *testing.T) { a := assert.New(t) - fsu := getFSU() + fsc := getFileServiceClient() // set up the share with numerous files - shareURL, shareName := createNewAzureShare(a, fsu) - defer deleteShare(a, shareURL) - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareURL, "") - a.NotNil(shareURL) + shareClient, shareName := createNewShare(a, fsc) + defer deleteShare(a, shareClient) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareClient, fsc, "") + a.NotNil(shareClient) a.NotZero(len(fileList)) // set up interceptor @@ -112,14 +112,14 @@ func TestRemoveFilesUnderShare(t *testing.T) { func TestRemoveFilesUnderDirectory(t *testing.T) { a := assert.New(t) - fsu := getFSU() + fsc := getFileServiceClient() dirName := "dir1/dir2/dir3/" // set up the share with numerous files - shareURL, shareName := createNewAzureShare(a, fsu) - defer deleteShare(a, shareURL) - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareURL, dirName) - a.NotNil(shareURL) + shareClient, shareName := createNewShare(a, fsc) + defer deleteShare(a, shareClient) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareClient, fsc, dirName) + a.NotNil(shareClient) a.NotZero(len(fileList)) // set up interceptor @@ -169,18 +169,18 @@ func TestRemoveFilesUnderDirectory(t *testing.T) { // include flag limits the scope of the delete func TestRemoveFilesWithIncludeFlag(t *testing.T) { a := assert.New(t) - fsu := getFSU() + fsc := getFileServiceClient() // set up the share with numerous files - shareURL, shareName := createNewAzureShare(a, fsu) - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareURL, "") - defer deleteShare(a, shareURL) - a.NotNil(shareURL) + shareClient, shareName := createNewShare(a, fsc) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareClient, fsc, "") + defer deleteShare(a, shareClient) + a.NotNil(shareClient) a.NotZero(len(fileList)) // add special files that we wish to include filesToInclude := []string{"important.pdf", "includeSub/amazing.jpeg", "exactName"} - scenarioHelper{}.generateAzureFilesFromList(a, shareURL, filesToInclude) + scenarioHelper{}.generateShareFilesFromList(a, shareClient, fsc, filesToInclude) includeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -203,18 +203,18 @@ func TestRemoveFilesWithIncludeFlag(t *testing.T) { // exclude flag limits the scope of the delete func TestRemoveFilesWithExcludeFlag(t *testing.T) { a := assert.New(t) - fsu := getFSU() + fsc := getFileServiceClient() // set up the share with numerous files - shareURL, shareName := createNewAzureShare(a, fsu) - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareURL, "") - defer deleteShare(a, shareURL) - a.NotNil(shareURL) + shareClient, shareName := createNewShare(a, fsc) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareClient, fsc, "") + defer deleteShare(a, shareClient) + a.NotNil(shareClient) a.NotZero(len(fileList)) // add special files that we wish to exclude filesToExclude := []string{"notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateAzureFilesFromList(a, shareURL, filesToExclude) + scenarioHelper{}.generateShareFilesFromList(a, shareClient, fsc, filesToExclude) excludeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -237,24 +237,24 @@ func TestRemoveFilesWithExcludeFlag(t *testing.T) { // include and exclude flag can work together to limit the scope of the delete func TestRemoveFilesWithIncludeAndExcludeFlag(t *testing.T) { a := assert.New(t) - fsu := getFSU() + fsc := getFileServiceClient() // set up the share with numerous files - shareURL, shareName := createNewAzureShare(a, fsu) - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareURL, "") - defer deleteShare(a, shareURL) - a.NotNil(shareURL) + shareClient, shareName := createNewShare(a, fsc) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareClient, fsc, "") + defer deleteShare(a, shareClient) + a.NotNil(shareClient) a.NotZero(len(fileList)) // add special files that we wish to include filesToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateAzureFilesFromList(a, shareURL, filesToInclude) + scenarioHelper{}.generateShareFilesFromList(a, shareClient, fsc, filesToInclude) includeString := "*.pdf;*.jpeg;exactName" // add special files that we wish to exclude // note that the excluded files also match the include string filesToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateAzureFilesFromList(a, shareURL, filesToExclude) + scenarioHelper{}.generateShareFilesFromList(a, shareClient, fsc, filesToExclude) excludeString := "so*;not*;exactName" // set up interceptor @@ -278,15 +278,15 @@ func TestRemoveFilesWithIncludeAndExcludeFlag(t *testing.T) { // note: list-of-files flag is used func TestRemoveListOfFilesAndDirectories(t *testing.T) { a := assert.New(t) - fsu := getFSU() + fsc := getFileServiceClient() dirName := "megadir" // set up the share with numerous files - shareURL, shareName := createNewAzureShare(a, fsu) - a.NotNil(shareURL) - defer deleteShare(a, shareURL) - individualFilesList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareURL, "") - filesUnderTopDir := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareURL, dirName+"/") + shareClient, shareName := createNewShare(a, fsc) + a.NotNil(shareClient) + defer deleteShare(a, shareClient) + individualFilesList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareClient, fsc, "") + filesUnderTopDir := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareClient, fsc, dirName+"/") combined := append(individualFilesList, filesUnderTopDir...) a.NotZero(len(combined)) @@ -346,25 +346,25 @@ func TestRemoveListOfFilesAndDirectories(t *testing.T) { // include and exclude flag can work together to limit the scope of the delete func TestRemoveListOfFilesWithIncludeAndExclude(t *testing.T) { a := assert.New(t) - fsu := getFSU() + fsc := getFileServiceClient() dirName := "megadir" // set up the share with numerous files - shareURL, shareName := createNewAzureShare(a, fsu) - a.NotNil(shareURL) - defer deleteShare(a, shareURL) - individualFilesList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareURL, "") - scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareURL, dirName+"/") + shareClient, shareName := createNewShare(a, fsc) + a.NotNil(shareClient) + defer deleteShare(a, shareClient) + individualFilesList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareClient, fsc, "") + scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareClient, fsc, dirName+"/") // add special files that we wish to include filesToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateAzureFilesFromList(a, shareURL, filesToInclude) + scenarioHelper{}.generateShareFilesFromList(a, shareClient, fsc, filesToInclude) includeString := "*.pdf;*.jpeg;exactName" // add special files that we wish to exclude // note that the excluded files also match the include string filesToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateAzureFilesFromList(a, shareURL, filesToExclude) + scenarioHelper{}.generateShareFilesFromList(a, shareClient, fsc, filesToExclude) excludeString := "so*;not*;exactName" // set up interceptor @@ -404,15 +404,15 @@ func TestRemoveListOfFilesWithIncludeAndExclude(t *testing.T) { func TestRemoveSingleFileWithFromTo(t *testing.T) { a := assert.New(t) - fsu := getFSU() - shareURL, shareName := createNewAzureShare(a, fsu) - defer deleteShare(a, shareURL) + fsc := getFileServiceClient() + shareClient, shareName := createNewShare(a, fsc) + defer deleteShare(a, shareClient) for _, fileName := range []string{"top/mid/low/singlefileisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the share with a single file fileList := []string{fileName} - scenarioHelper{}.generateAzureFilesFromList(a, shareURL, fileList) - a.NotNil(shareURL) + scenarioHelper{}.generateShareFilesFromList(a, shareClient, fsc, fileList) + a.NotNil(shareClient) // set up interceptor mockedRPC := interceptor{} @@ -435,13 +435,13 @@ func TestRemoveSingleFileWithFromTo(t *testing.T) { func TestRemoveFilesUnderShareWithFromTo(t *testing.T) { a := assert.New(t) - fsu := getFSU() + fsc := getFileServiceClient() // set up the share with numerous files - shareURL, shareName := createNewAzureShare(a, fsu) - defer deleteShare(a, shareURL) - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareURL, "") - a.NotNil(shareURL) + shareClient, shareName := createNewShare(a, fsc) + defer deleteShare(a, shareClient) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareClient, fsc, "") + a.NotNil(shareClient) a.NotZero(len(fileList)) // set up interceptor @@ -488,14 +488,14 @@ func TestRemoveFilesUnderShareWithFromTo(t *testing.T) { func TestRemoveFilesUnderDirectoryWithFromTo(t *testing.T) { a := assert.New(t) - fsu := getFSU() + fsc := getFileServiceClient() dirName := "dir1/dir2/dir3/" // set up the share with numerous files - shareURL, shareName := createNewAzureShare(a, fsu) - defer deleteShare(a, shareURL) - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareURL, dirName) - a.NotNil(shareURL) + shareClient, shareName := createNewShare(a, fsc) + defer deleteShare(a, shareClient) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareClient, fsc, dirName) + a.NotNil(shareClient) a.NotZero(len(fileList)) // set up interceptor diff --git a/cmd/zt_scenario_helpers_for_test.go b/cmd/zt_scenario_helpers_for_test.go index 684806c4e..b792344c9 100644 --- a/cmd/zt_scenario_helpers_for_test.go +++ b/cmd/zt_scenario_helpers_for_test.go @@ -23,6 +23,17 @@ package cmd import ( "context" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob" + blobservice "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" + sharefile "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + fileservice "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" "github.com/stretchr/testify/assert" "io" "net/url" @@ -39,8 +50,6 @@ import ( "github.com/minio/minio-go" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" ) const defaultFileSize = 1024 @@ -133,34 +142,35 @@ func (s scenarioHelper) generateCommonRemoteScenarioForLocal(a *assert.Assertion return } -func (scenarioHelper) generateCommonRemoteScenarioForSoftDelete(a *assert.Assertions, containerURL azblob.ContainerURL, prefix string) (string, []azblob.BlockBlobURL, []string) { - blobList := make([]azblob.BlockBlobURL, 3) +func (scenarioHelper) generateCommonRemoteScenarioForSoftDelete(a *assert.Assertions, containerClient *container.Client, prefix string) (string, []*blockblob.Client, []string) { + blobList := make([]*blockblob.Client, 3) blobNames := make([]string, 3) var listOfTransfers []string - blobURL1, blobName1 := createNewBlockBlob(a, containerURL, prefix+"top") - blobURL2, blobName2 := createNewBlockBlob(a, containerURL, prefix+"sub1/") - blobURL3, blobName3 := createNewBlockBlob(a, containerURL, prefix+"sub1/sub3/sub5/") + blobClient1, blobName1 := createNewBlockBlob(a, containerClient, prefix+"top") + blobClient2, blobName2 := createNewBlockBlob(a, containerClient, prefix+"sub1/") + blobClient3, blobName3 := createNewBlockBlob(a, containerClient, prefix+"sub1/sub3/sub5/") - blobList[0] = blobURL1 + blobList[0] = blobClient1 blobNames[0] = blobName1 - blobList[1] = blobURL2 + blobList[1] = blobClient2 blobNames[1] = blobName2 - blobList[2] = blobURL3 + blobList[2] = blobClient3 blobNames[2] = blobName3 for i := 0; i < len(blobList); i++ { for j := 0; j < 3; j++ { // create 3 soft-deleted snapshots for each blob // Create snapshot for blob - snapResp, err := blobList[i].CreateSnapshot(ctx, azblob.Metadata{}, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) + snapResp, err := blobList[i].CreateSnapshot(ctx, nil) a.NotNil(snapResp) a.Nil(err) time.Sleep(time.Millisecond * 30) // Soft delete snapshot - snapshotBlob := blobList[i].WithSnapshot(snapResp.Snapshot()) - _, err = snapshotBlob.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) + snapshotBlob, err := blobList[i].WithSnapshot(*snapResp.Snapshot) + a.Nil(err) + _, err = snapshotBlob.Delete(ctx, nil) a.Nil(err) listOfTransfers = append(listOfTransfers, blobNames[i]) @@ -172,15 +182,15 @@ func (scenarioHelper) generateCommonRemoteScenarioForSoftDelete(a *assert.Assert return blobName1, blobList, listOfTransfers } -func (scenarioHelper) generateCommonRemoteScenarioForBlob(a *assert.Assertions, containerURL azblob.ContainerURL, prefix string) (blobList []string) { +func (scenarioHelper) generateCommonRemoteScenarioForBlob(a *assert.Assertions, containerClient *container.Client, prefix string) (blobList []string) { blobList = make([]string, 50) for i := 0; i < 10; i++ { - _, blobName1 := createNewBlockBlob(a, containerURL, prefix+"top") - _, blobName2 := createNewBlockBlob(a, containerURL, prefix+"sub1/") - _, blobName3 := createNewBlockBlob(a, containerURL, prefix+"sub2/") - _, blobName4 := createNewBlockBlob(a, containerURL, prefix+"sub1/sub3/sub5/") - _, blobName5 := createNewBlockBlob(a, containerURL, prefix+specialNames[i]) + _, blobName1 := createNewBlockBlob(a, containerClient, prefix+"top") + _, blobName2 := createNewBlockBlob(a, containerClient, prefix+"sub1/") + _, blobName3 := createNewBlockBlob(a, containerClient, prefix+"sub2/") + _, blobName4 := createNewBlockBlob(a, containerClient, prefix+"sub1/sub3/sub5/") + _, blobName5 := createNewBlockBlob(a, containerClient, prefix+specialNames[i]) blobList[5*i] = blobName1 blobList[5*i+1] = blobName2 @@ -195,15 +205,15 @@ func (scenarioHelper) generateCommonRemoteScenarioForBlob(a *assert.Assertions, } // same as blob, but for every virtual directory, a blob with the same name is created, and it has metadata 'hdi_isfolder = true' -func (scenarioHelper) generateCommonRemoteScenarioForWASB(a *assert.Assertions, containerURL azblob.ContainerURL, prefix string) (blobList []string) { +func (scenarioHelper) generateCommonRemoteScenarioForWASB(a *assert.Assertions, containerClient *container.Client, prefix string) (blobList []string) { blobList = make([]string, 50) for i := 0; i < 10; i++ { - _, blobName1 := createNewBlockBlob(a, containerURL, prefix+"top") - _, blobName2 := createNewBlockBlob(a, containerURL, prefix+"sub1/") - _, blobName3 := createNewBlockBlob(a, containerURL, prefix+"sub2/") - _, blobName4 := createNewBlockBlob(a, containerURL, prefix+"sub1/sub3/sub5/") - _, blobName5 := createNewBlockBlob(a, containerURL, prefix+specialNames[i]) + _, blobName1 := createNewBlockBlob(a, containerClient, prefix+"top") + _, blobName2 := createNewBlockBlob(a, containerClient, prefix+"sub1/") + _, blobName3 := createNewBlockBlob(a, containerClient, prefix+"sub2/") + _, blobName4 := createNewBlockBlob(a, containerClient, prefix+"sub1/sub3/sub5/") + _, blobName5 := createNewBlockBlob(a, containerClient, prefix+specialNames[i]) blobList[5*i] = blobName1 blobList[5*i+1] = blobName2 @@ -214,14 +224,14 @@ func (scenarioHelper) generateCommonRemoteScenarioForWASB(a *assert.Assertions, if prefix != "" { rootDir := strings.TrimSuffix(prefix, "/") - createNewDirectoryStub(a, containerURL, rootDir) + createNewDirectoryStub(a, containerClient, rootDir) blobList = append(blobList, rootDir) } - createNewDirectoryStub(a, containerURL, prefix+"sub1") - createNewDirectoryStub(a, containerURL, prefix+"sub1/sub3") - createNewDirectoryStub(a, containerURL, prefix+"sub1/sub3/sub5") - createNewDirectoryStub(a, containerURL, prefix+"sub2") + createNewDirectoryStub(a, containerClient, prefix+"sub1") + createNewDirectoryStub(a, containerClient, prefix+"sub1/sub3") + createNewDirectoryStub(a, containerClient, prefix+"sub1/sub3/sub5") + createNewDirectoryStub(a, containerClient, prefix+"sub2") blobList = append(blobList, []string{prefix + "sub1", prefix + "sub1/sub3", prefix + "sub1/sub3/sub5", prefix + "sub2"}...) // sleep a bit so that the blobs' lmts are guaranteed to be in the past @@ -251,15 +261,15 @@ func (scenarioHelper) generateCommonRemoteScenarioForBlobFS(a *assert.Assertions return } -func (scenarioHelper) generateCommonRemoteScenarioForAzureFile(a *assert.Assertions, shareURL azfile.ShareURL, prefix string) (fileList []string) { +func (scenarioHelper) generateCommonRemoteScenarioForAzureFile(a *assert.Assertions, shareClient *share.Client, serviceClient *fileservice.Client, prefix string) (fileList []string) { fileList = make([]string, 50) for i := 0; i < 10; i++ { - _, fileName1 := createNewAzureFile(a, shareURL, prefix+"top") - _, fileName2 := createNewAzureFile(a, shareURL, prefix+"sub1/") - _, fileName3 := createNewAzureFile(a, shareURL, prefix+"sub2/") - _, fileName4 := createNewAzureFile(a, shareURL, prefix+"sub1/sub3/sub5/") - _, fileName5 := createNewAzureFile(a, shareURL, prefix+specialNames[i]) + _, fileName1 := createNewShareFile(a, shareClient, serviceClient, prefix+"top") + _, fileName2 := createNewShareFile(a, shareClient, serviceClient, prefix+"sub1/") + _, fileName3 := createNewShareFile(a, shareClient, serviceClient, prefix+"sub2/") + _, fileName4 := createNewShareFile(a, shareClient, serviceClient, prefix+"sub1/sub3/sub5/") + _, fileName5 := createNewShareFile(a, shareClient, serviceClient, prefix+specialNames[i]) fileList[5*i] = fileName1 fileList[5*i+1] = fileName2 @@ -273,23 +283,23 @@ func (scenarioHelper) generateCommonRemoteScenarioForAzureFile(a *assert.Asserti return } -func (s scenarioHelper) generateBlobContainersAndBlobsFromLists(a *assert.Assertions, serviceURL azblob.ServiceURL, containerList []string, blobList []string, data string) { +func (s scenarioHelper) generateBlobContainersAndBlobsFromLists(a *assert.Assertions, serviceClient *blobservice.Client, containerList []string, blobList []string, data string) { for _, containerName := range containerList { - curl := serviceURL.NewContainerURL(containerName) - _, err := curl.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone) + containerClient := serviceClient.NewContainerClient(containerName) + _, err := containerClient.Create(ctx, nil) a.Nil(err) - s.generateBlobsFromList(a, curl, blobList, data) + s.generateBlobsFromList(a, containerClient, blobList, data) } } -func (s scenarioHelper) generateFileSharesAndFilesFromLists(a *assert.Assertions, serviceURL azfile.ServiceURL, shareList []string, fileList []string, data string) { +func (s scenarioHelper) generateFileSharesAndFilesFromLists(a *assert.Assertions, serviceClient *fileservice.Client, shareList []string, fileList []string, data string) { for _, shareName := range shareList { - surl := serviceURL.NewShareURL(shareName) - _, err := surl.Create(ctx, azfile.Metadata{}, 0) + shareClient := serviceClient.NewShareClient(shareName) + _, err := shareClient.Create(ctx, nil) a.Nil(err) - s.generateAzureFilesFromList(a, surl, fileList) + s.generateShareFilesFromList(a, shareClient, serviceClient, fileList) } } @@ -322,11 +332,10 @@ func (s scenarioHelper) generateGCPBucketsAndObjectsFromLists(a *assert.Assertio } // create the demanded blobs -func (scenarioHelper) generateBlobsFromList(a *assert.Assertions, containerURL azblob.ContainerURL, blobList []string, data string) { +func (scenarioHelper) generateBlobsFromList(a *assert.Assertions, containerClient *container.Client, blobList []string, data string) { for _, blobName := range blobList { - blob := containerURL.NewBlockBlobURL(blobName) - _, err := blob.Upload(ctx, strings.NewReader(data), azblob.BlobHTTPHeaders{}, - nil, azblob.BlobAccessConditions{}, azblob.DefaultAccessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) + blobClient := containerClient.NewBlockBlobClient(blobName) + _, err := blobClient.Upload(ctx, streaming.NopCloser(strings.NewReader(data)), nil) a.Nil(err) } @@ -334,33 +343,21 @@ func (scenarioHelper) generateBlobsFromList(a *assert.Assertions, containerURL a time.Sleep(time.Millisecond * 1050) } -func (scenarioHelper) generatePageBlobsFromList(a *assert.Assertions, containerURL azblob.ContainerURL, blobList []string, data string) { +func (scenarioHelper) generatePageBlobsFromList(a *assert.Assertions, containerClient *container.Client, blobList []string, data string) { for _, blobName := range blobList { // Create the blob (PUT blob) - blob := containerURL.NewPageBlobURL(blobName) - _, err := blob.Create(ctx, + blobClient := containerClient.NewPageBlobClient(blobName) + _, err := blobClient.Create(ctx, int64(len(data)), - 0, - azblob.BlobHTTPHeaders{ - ContentType: "text/random", - }, - azblob.Metadata{}, - azblob.BlobAccessConditions{}, - azblob.DefaultPremiumBlobAccessTier, - nil, - azblob.ClientProvidedKeyOptions{}, - azblob.ImmutabilityPolicyOptions{}, - ) + &pageblob.CreateOptions{ + SequenceNumber: to.Ptr(int64(0)), + HTTPHeaders: &blob.HTTPHeaders{BlobContentType: to.Ptr("text/random")}, + }) a.Nil(err) // Create the page (PUT page) - _, err = blob.UploadPages(ctx, - 0, - strings.NewReader(data), - azblob.PageBlobAccessConditions{}, - nil, - azblob.ClientProvidedKeyOptions{}, - ) + _, err = blobClient.UploadPages(ctx, streaming.NopCloser(strings.NewReader(data)), + blob.HTTPRange{Offset: 0, Count: int64(len(data))}, nil) a.Nil(err) } @@ -368,28 +365,18 @@ func (scenarioHelper) generatePageBlobsFromList(a *assert.Assertions, containerU time.Sleep(time.Millisecond * 1050) } -func (scenarioHelper) generateAppendBlobsFromList(a *assert.Assertions, containerURL azblob.ContainerURL, blobList []string, data string) { +func (scenarioHelper) generateAppendBlobsFromList(a *assert.Assertions, containerClient *container.Client, blobList []string, data string) { for _, blobName := range blobList { // Create the blob (PUT blob) - blob := containerURL.NewAppendBlobURL(blobName) - _, err := blob.Create(ctx, - azblob.BlobHTTPHeaders{ - ContentType: "text/random", - }, - azblob.Metadata{}, - azblob.BlobAccessConditions{}, - nil, - azblob.ClientProvidedKeyOptions{}, - azblob.ImmutabilityPolicyOptions{}, - ) + blobClient := containerClient.NewAppendBlobClient(blobName) + _, err := blobClient.Create(ctx, + &appendblob.CreateOptions{ + HTTPHeaders: &blob.HTTPHeaders{BlobContentType: to.Ptr("text/random")}, + }) a.Nil(err) // Append a block (PUT block) - _, err = blob.AppendBlock(ctx, - strings.NewReader(data), - azblob.AppendBlobAccessConditions{}, - nil, - azblob.ClientProvidedKeyOptions{}) + _, err = blobClient.AppendBlock(ctx, streaming.NopCloser(strings.NewReader(data)), nil) a.Nil(err) } @@ -397,10 +384,9 @@ func (scenarioHelper) generateAppendBlobsFromList(a *assert.Assertions, containe time.Sleep(time.Millisecond * 1050) } -func (scenarioHelper) generateBlockBlobWithAccessTier(a *assert.Assertions, containerURL azblob.ContainerURL, blobName string, accessTier azblob.AccessTierType) { - blob := containerURL.NewBlockBlobURL(blobName) - _, err := blob.Upload(ctx, strings.NewReader(blockBlobDefaultData), azblob.BlobHTTPHeaders{}, - nil, azblob.BlobAccessConditions{}, accessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) +func (scenarioHelper) generateBlockBlobWithAccessTier(a *assert.Assertions, containerClient *container.Client, blobName string, accessTier *blob.AccessTier) { + blobClient := containerClient.NewBlockBlobClient(blobName) + _, err := blobClient.Upload(ctx, streaming.NopCloser(strings.NewReader(blockBlobDefaultData)), &blockblob.UploadOptions{Tier: accessTier}) a.Nil(err) } @@ -428,10 +414,12 @@ func (scenarioHelper) generateGCPObjects(a *assert.Assertions, client *gcpUtils. } // create the demanded files -func (scenarioHelper) generateFlatFiles(a *assert.Assertions, shareURL azfile.ShareURL, fileList []string) { +func (scenarioHelper) generateFlatFiles(a *assert.Assertions, shareClient *share.Client, fileList []string) { for _, fileName := range fileList { - file := shareURL.NewRootDirectoryURL().NewFileURL(fileName) - err := azfile.UploadBufferToAzureFile(ctx, []byte(fileDefaultData), file, azfile.UploadToAzureFileOptions{}) + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err := fileClient.Create(ctx, int64(len(fileDefaultData)), nil) + a.Nil(err) + err = fileClient.UploadBuffer(ctx, []byte(fileDefaultData), nil) a.Nil(err) } @@ -506,22 +494,20 @@ func (scenarioHelper) generateCommonRemoteScenarioForGCP(a *assert.Assertions, c return objectList } -// create the demanded azure files -func (scenarioHelper) generateAzureFilesFromList(a *assert.Assertions, shareURL azfile.ShareURL, fileList []string) { +func (scenarioHelper) generateShareFilesFromList(a *assert.Assertions, shareClient *share.Client, serviceClient *fileservice.Client, fileList []string) { for _, filePath := range fileList { - file := shareURL.NewRootDirectoryURL().NewFileURL(filePath) + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(filePath) // create parents first - generateParentsForAzureFile(a, file) + generateParentsForShareFile(a, fileClient, serviceClient) // create the file itself - cResp, err := file.Create(ctx, defaultAzureFileSizeInBytes, azfile.FileHTTPHeaders{}, azfile.Metadata{}) + _, err := fileClient.Create(ctx, defaultAzureFileSizeInBytes, nil) a.Nil(err) - a.Equal(201, cResp.StatusCode()) } // sleep a bit so that the files' lmts are guaranteed to be in the past - time.Sleep(time.Millisecond * 1050) + time.Sleep(time.Second * 3) } func (scenarioHelper) generateBFSPathsFromList(a *assert.Assertions, filesystemURL azbfs.FileSystemURL, fileList []string) { @@ -601,37 +587,97 @@ func (scenarioHelper) addPrefix(list []string, prefix string) []string { return modifiedList } -func (scenarioHelper) getRawContainerURLWithSAS(a *assert.Assertions, containerName string) url.URL { +func (scenarioHelper) getRawContainerURLWithSAS(a *assert.Assertions, containerName string) *url.URL { + accountName, accountKey := getAccountAndKey() + credential, err := blob.NewSharedKeyCredential(accountName, accountKey) + a.Nil(err) + cc := getContainerClientWithSAS(a, credential, containerName) + + u := cc.URL() + parsedURL, err := url.Parse(u) + return parsedURL +} + +func (scenarioHelper) getContainerClientWithSAS(a *assert.Assertions, containerName string) *container.Client { + accountName, accountKey := getAccountAndKey() + credential, err := blob.NewSharedKeyCredential(accountName, accountKey) + a.Nil(err) + containerURLWithSAS := getContainerClientWithSAS(a, credential, containerName) + return containerURLWithSAS +} + +func (scenarioHelper) getRawBlobURLWithSAS(a *assert.Assertions, containerName string, blobName string) *url.URL { + accountName, accountKey := getAccountAndKey() + credential, err := blob.NewSharedKeyCredential(accountName, accountKey) + a.Nil(err) + cc := getContainerClientWithSAS(a, credential, containerName) + bc := cc.NewBlockBlobClient(blobName) + + u := bc.URL() + parsedURL, err := url.Parse(u) + return parsedURL +} + +func (scenarioHelper) getBlobClientWithSAS(a *assert.Assertions, containerName string, blobName string) *blob.Client { accountName, accountKey := getAccountAndKey() - credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) + credential, err := blob.NewSharedKeyCredential(accountName, accountKey) a.Nil(err) - containerURLWithSAS := getContainerURLWithSAS(a, *credential, containerName) - return containerURLWithSAS.URL() + containerURLWithSAS := getContainerClientWithSAS(a, credential, containerName) + blobURLWithSAS := containerURLWithSAS.NewBlobClient(blobName) + return blobURLWithSAS } -func (scenarioHelper) getRawBlobURLWithSAS(a *assert.Assertions, containerName string, blobName string) url.URL { +func (scenarioHelper) getRawBlobServiceURLWithSAS(a *assert.Assertions) *url.URL { accountName, accountKey := getAccountAndKey() - credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) + credential, err := blob.NewSharedKeyCredential(accountName, accountKey) a.Nil(err) - containerURLWithSAS := getContainerURLWithSAS(a, *credential, containerName) - blobURLWithSAS := containerURLWithSAS.NewBlockBlobURL(blobName) - return blobURLWithSAS.URL() + + u := getBlobServiceClientWithSAS(a, credential).URL() + parsedURL, err := url.Parse(u) + return parsedURL } -func (scenarioHelper) getRawBlobServiceURLWithSAS(a *assert.Assertions) url.URL { +func (scenarioHelper) getBlobServiceClientWithSAS(a *assert.Assertions) *blobservice.Client { accountName, accountKey := getAccountAndKey() - credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) + credential, err := blob.NewSharedKeyCredential(accountName, accountKey) + a.Nil(err) + + return getBlobServiceClientWithSAS(a, credential) +} + +func (scenarioHelper) getBlobServiceClientWithSASFromURL(a *assert.Assertions, rawURL string) *blobservice.Client { + blobURLParts, err := blob.ParseURL(rawURL) + a.Nil(err) + blobURLParts.ContainerName = "" + blobURLParts.BlobName = "" + blobURLParts.VersionID = "" + blobURLParts.Snapshot = "" + + client, err := blobservice.NewClientWithNoCredential(blobURLParts.String(), nil) + a.Nil(err) + + return client +} + +func (scenarioHelper) getFileServiceClientWithSASFromURL(a *assert.Assertions, rawURL string) *fileservice.Client { + fileURLParts, err := sharefile.ParseURL(rawURL) + a.Nil(err) + fileURLParts.ShareName = "" + fileURLParts.ShareSnapshot = "" + fileURLParts.DirectoryOrFilePath = "" + + client, err := fileservice.NewClientWithNoCredential(fileURLParts.String(), nil) a.Nil(err) - return getBlobServiceURLWithSAS(a, *credential).URL() + return client } -func (scenarioHelper) getRawFileServiceURLWithSAS(a *assert.Assertions) url.URL { +func (scenarioHelper) getFileServiceClientWithSAS(a *assert.Assertions) *fileservice.Client { accountName, accountKey := getAccountAndKey() - credential, err := azfile.NewSharedKeyCredential(accountName, accountKey) + credential, err := sharefile.NewSharedKeyCredential(accountName, accountKey) a.Nil(err) - return getFileServiceURLWithSAS(a, *credential).URL() + return getFileServiceClientWithSAS(a, credential) } func (scenarioHelper) getRawAdlsServiceURLWithSAS(a *assert.Assertions) azbfs.ServiceURL { @@ -641,28 +687,27 @@ func (scenarioHelper) getRawAdlsServiceURLWithSAS(a *assert.Assertions) azbfs.Se return getAdlsServiceURLWithSAS(a, *credential) } -func (scenarioHelper) getBlobServiceURL(a *assert.Assertions) azblob.ServiceURL { +func (scenarioHelper) getBlobServiceClient(a *assert.Assertions) *blobservice.Client { accountName, accountKey := getAccountAndKey() - credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) + credential, err := blob.NewSharedKeyCredential(accountName, accountKey) a.Nil(err) rawURL := fmt.Sprintf("https://%s.blob.core.windows.net", credential.AccountName()) - // convert the raw url and validate it was parsed successfully - fullURL, err := url.Parse(rawURL) + client, err := blobservice.NewClientWithSharedKeyCredential(rawURL, credential, nil) a.Nil(err) - return azblob.NewServiceURL(*fullURL, azblob.NewPipeline(credential, azblob.PipelineOptions{})) + return client } -func (s scenarioHelper) getContainerURL(a *assert.Assertions, containerName string) azblob.ContainerURL { - serviceURL := s.getBlobServiceURL(a) - containerURL := serviceURL.NewContainerURL(containerName) +func (s scenarioHelper) getContainerClient(a *assert.Assertions, containerName string) *container.Client { + serviceURL := s.getBlobServiceClient(a) + containerURL := serviceURL.NewContainerClient(containerName) return containerURL } func (scenarioHelper) getRawS3AccountURL(a *assert.Assertions, region string) url.URL { - rawURL := fmt.Sprintf("https://s3%s.amazonaws.com", common.IffString(region == "", "", "-"+region)) + rawURL := fmt.Sprintf("https://s3%s.amazonaws.com", common.Iff(region == "", "", "-"+region)) fullURL, err := url.Parse(rawURL) a.Nil(err) @@ -679,7 +724,7 @@ func (scenarioHelper) getRawGCPAccountURL(a *assert.Assertions) url.URL { // TODO: Possibly add virtual-hosted-style and dual stack support. Currently use path style for testing. func (scenarioHelper) getRawS3BucketURL(a *assert.Assertions, region string, bucketName string) url.URL { - rawURL := fmt.Sprintf("https://s3%s.amazonaws.com/%s", common.IffString(region == "", "", "-"+region), bucketName) + rawURL := fmt.Sprintf("https://s3%s.amazonaws.com/%s", common.Iff(region == "", "", "-"+region), bucketName) fullURL, err := url.Parse(rawURL) a.Nil(err) @@ -696,7 +741,7 @@ func (scenarioHelper) getRawGCPBucketURL(a *assert.Assertions, bucketName string } func (scenarioHelper) getRawS3ObjectURL(a *assert.Assertions, region string, bucketName string, objectName string) url.URL { - rawURL := fmt.Sprintf("https://s3%s.amazonaws.com/%s/%s", common.IffString(region == "", "", "-"+region), bucketName, objectName) + rawURL := fmt.Sprintf("https://s3%s.amazonaws.com/%s/%s", common.Iff(region == "", "", "-"+region), bucketName, objectName) fullURL, err := url.Parse(rawURL) a.Nil(err) @@ -711,32 +756,50 @@ func (scenarioHelper) getRawGCPObjectURL(a *assert.Assertions, bucketName string return *fullURL } -func (scenarioHelper) getRawFileURLWithSAS(a *assert.Assertions, shareName string, fileName string) url.URL { - credential, err := getGenericCredentialForFile("") +func (scenarioHelper) getRawFileURLWithSAS(a *assert.Assertions, shareName string, fileName string) *url.URL { + accountName, accountKey := getAccountAndKey() + credential, err := sharefile.NewSharedKeyCredential(accountName, accountKey) + a.Nil(err) + sc := getShareClientWithSAS(a, credential, shareName) + fc := sc.NewRootDirectoryClient().NewFileClient(fileName) + + u := fc.URL() + parsedURL, err := url.Parse(u) + return parsedURL +} + +func (scenarioHelper) getRawShareURLWithSAS(a *assert.Assertions, shareName string) *url.URL { + accountName, accountKey := getAccountAndKey() + credential, err := sharefile.NewSharedKeyCredential(accountName, accountKey) a.Nil(err) - shareURLWithSAS := getShareURLWithSAS(a, *credential, shareName) - fileURLWithSAS := shareURLWithSAS.NewRootDirectoryURL().NewFileURL(fileName) - return fileURLWithSAS.URL() + sc := getShareClientWithSAS(a, credential, shareName) + + u := sc.URL() + parsedURL, err := url.Parse(u) + return parsedURL } -func (scenarioHelper) getRawShareURLWithSAS(a *assert.Assertions, shareName string) url.URL { +func (scenarioHelper) getRawFileServiceURLWithSAS(a *assert.Assertions) *url.URL { accountName, accountKey := getAccountAndKey() - credential, err := azfile.NewSharedKeyCredential(accountName, accountKey) + credential, err := sharefile.NewSharedKeyCredential(accountName, accountKey) a.Nil(err) - shareURLWithSAS := getShareURLWithSAS(a, *credential, shareName) - return shareURLWithSAS.URL() + sc := getFileServiceClientWithSAS(a, credential) + + u := sc.URL() + parsedURL, err := url.Parse(u) + return parsedURL } -func (scenarioHelper) blobExists(blobURL azblob.BlobURL) bool { - _, err := blobURL.GetProperties(context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) +func (scenarioHelper) blobExists(blobClient *blob.Client) bool { + _, err := blobClient.GetProperties(context.Background(), nil) if err == nil { return true } return false } -func (scenarioHelper) containerExists(containerURL azblob.ContainerURL) bool { - _, err := containerURL.GetProperties(context.Background(), azblob.LeaseAccessConditions{}) +func (scenarioHelper) containerExists(containerClient *container.Client) bool { + _, err := containerClient.GetProperties(context.Background(), nil) if err == nil { return true } @@ -842,7 +905,7 @@ func validateRemoveTransfersAreScheduled(a *assert.Assertions, isSrcEncoded bool // look up the source from the expected transfers, make sure it exists _, srcExist := lookupMap[srcRelativeFilePath] - a.True(srcExist) + a.True(srcExist, srcRelativeFilePath) delete(lookupMap, srcRelativeFilePath) } diff --git a/cmd/zt_set_properties_test.go b/cmd/zt_set_properties_test.go index 9844c2c3a..c5adfb7ec 100644 --- a/cmd/zt_set_properties_test.go +++ b/cmd/zt_set_properties_test.go @@ -21,8 +21,12 @@ package cmd import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" "github.com/stretchr/testify/assert" "net/url" "strings" @@ -47,40 +51,35 @@ func (tp transferParams) getMetadata() common.Metadata { return metadataMap } -func (scenarioHelper) generateBlobsFromListWithAccessTier(a *assert.Assertions, containerURL azblob.ContainerURL, blobList []string, data string, accessTier azblob.AccessTierType) { +func (scenarioHelper) generateBlobsFromListWithAccessTier(a *assert.Assertions, cc *container.Client, blobList []string, data string, accessTier *blob.AccessTier) { for _, blobName := range blobList { - blob := containerURL.NewBlockBlobURL(blobName) - cResp, err := blob.Upload(ctx, strings.NewReader(data), azblob.BlobHTTPHeaders{}, - nil, azblob.BlobAccessConditions{}, accessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) + bc := cc.NewBlockBlobClient(blobName) + _, err := bc.Upload(ctx, streaming.NopCloser(strings.NewReader(data)), &blockblob.UploadOptions{Tier: accessTier}) a.Nil(err) - a.Equal(201, cResp.StatusCode()) } // sleep a bit so that the blobs' lmts are guaranteed to be in the past time.Sleep(time.Millisecond * 1050) } -func createNewBlockBlobWithAccessTier(a *assert.Assertions, container azblob.ContainerURL, prefix string, accessTier azblob.AccessTierType) (blob azblob.BlockBlobURL, name string) { - blob, name = getBlockBlobURL(a, container, prefix) - - cResp, err := blob.Upload(ctx, strings.NewReader(blockBlobDefaultData), azblob.BlobHTTPHeaders{}, - nil, azblob.BlobAccessConditions{}, accessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) +func createNewBlockBlobWithAccessTier(a *assert.Assertions, cc *container.Client, prefix string, accessTier *blob.AccessTier) (bbc *blockblob.Client, name string) { + bbc, name = getBlockBlobClient(a, cc, prefix) + _, err := bbc.Upload(ctx, streaming.NopCloser(strings.NewReader(blockBlobDefaultData)), &blockblob.UploadOptions{Tier: accessTier}) a.Nil(err) - a.Equal(201, cResp.StatusCode()) return } -func (scenarioHelper) generateCommonRemoteScenarioForBlobWithAccessTier(a *assert.Assertions, containerURL azblob.ContainerURL, prefix string, accessTier azblob.AccessTierType) (blobList []string) { +func (scenarioHelper) generateCommonRemoteScenarioForBlobWithAccessTier(a *assert.Assertions, cc *container.Client, prefix string, accessTier *blob.AccessTier) (blobList []string) { blobList = make([]string, 50) for i := 0; i < 10; i++ { - _, blobName1 := createNewBlockBlobWithAccessTier(a, containerURL, prefix+"top", accessTier) - _, blobName2 := createNewBlockBlobWithAccessTier(a, containerURL, prefix+"sub1/", accessTier) - _, blobName3 := createNewBlockBlobWithAccessTier(a, containerURL, prefix+"sub2/", accessTier) - _, blobName4 := createNewBlockBlobWithAccessTier(a, containerURL, prefix+"sub1/sub3/sub5/", accessTier) - _, blobName5 := createNewBlockBlobWithAccessTier(a, containerURL, prefix+specialNames[i], accessTier) + _, blobName1 := createNewBlockBlobWithAccessTier(a, cc, prefix+"top", accessTier) + _, blobName2 := createNewBlockBlobWithAccessTier(a, cc, prefix+"sub1/", accessTier) + _, blobName3 := createNewBlockBlobWithAccessTier(a, cc, prefix+"sub2/", accessTier) + _, blobName4 := createNewBlockBlobWithAccessTier(a, cc, prefix+"sub1/sub3/sub5/", accessTier) + _, blobName5 := createNewBlockBlobWithAccessTier(a, cc, prefix+specialNames[i], accessTier) blobList[5*i] = blobName1 blobList[5*i+1] = blobName2 @@ -94,13 +93,20 @@ func (scenarioHelper) generateCommonRemoteScenarioForBlobWithAccessTier(a *asser return } -func checkMapsEqual(a *assert.Assertions, mapA map[string]string, mapB map[string]string) { +func checkTagsEqual(a *assert.Assertions, mapA map[string]string, mapB map[string]string) { a.Equal(len(mapB), len(mapA)) for k, v := range mapA { a.Equal(v, mapB[k]) } } +func checkMetadataEqual(a *assert.Assertions, mapA map[string]*string, mapB map[string]*string) { + a.Equal(len(mapB), len(mapA)) + for k, v := range mapA { + a.Equal(*v, *mapB[k]) + } +} + func validateSetPropertiesTransfersAreScheduled(a *assert.Assertions, isSrcEncoded bool, expectedTransfers []string, transferParams transferParams, mockedRPC interceptor) { // validate that the right number of transfers were scheduled @@ -111,8 +117,8 @@ func validateSetPropertiesTransfersAreScheduled(a *assert.Assertions, isSrcEncod for _, transfer := range mockedRPC.transfers { srcRelativeFilePath := transfer.Source a.Equal(transferParams.blockBlobTier.ToAccessTierType(), transfer.BlobTier) - checkMapsEqual(a, transfer.Metadata, transferParams.getMetadata()) - checkMapsEqual(a, transfer.BlobTags, transferParams.blobTags) + checkMetadataEqual(a, transfer.Metadata, transferParams.getMetadata()) + checkTagsEqual(a, transfer.BlobTags, transferParams.blobTags) if isSrcEncoded { srcRelativeFilePath, _ = url.PathUnescape(srcRelativeFilePath) @@ -128,17 +134,17 @@ func validateSetPropertiesTransfersAreScheduled(a *assert.Assertions, isSrcEncod func TestSetPropertiesSingleBlobForBlobTier(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) for _, blobName := range []string{"top/mid/low/singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} // upload the data with given accessTier - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) - a.NotNil(containerURL) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobList, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) + a.NotNil(cc) // set up interceptor mockedRPC := interceptor{} @@ -166,13 +172,13 @@ func TestSetPropertiesSingleBlobForBlobTier(t *testing.T) { func TestSetPropertiesBlobsUnderContainerForBlobTier(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, "", to.Ptr(blob.AccessTierHot)) + a.NotNil(cc) a.NotZero(len(blobList)) // set up interceptor @@ -221,18 +227,18 @@ func TestSetPropertiesBlobsUnderContainerForBlobTier(t *testing.T) { func TestSetPropertiesWithIncludeFlagForBlobTier(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, "", to.Ptr(blob.AccessTierHot)) + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobsToInclude, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) includeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -261,18 +267,18 @@ func TestSetPropertiesWithIncludeFlagForBlobTier(t *testing.T) { func TestSetPropertiesWithExcludeFlagForBlobTier(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, "", to.Ptr(blob.AccessTierHot)) + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotZero(len(blobList)) // add special blobs that we wish to exclude blobsToExclude := []string{"notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobsToExclude, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) excludeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -303,24 +309,24 @@ func TestSetPropertiesWithExcludeFlagForBlobTier(t *testing.T) { func TestSetPropertiesWithIncludeAndExcludeFlagForBlobTier(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, "", to.Ptr(blob.AccessTierHot)) + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobsToInclude, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) includeString := "*.pdf;*.jpeg;exactName" // add special blobs that we wish to exclude // note that the excluded files also match the include string blobsToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobsToExclude, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) excludeString := "so*;not*;exactName" // set up interceptor @@ -353,15 +359,15 @@ func TestSetPropertiesWithIncludeAndExcludeFlagForBlobTier(t *testing.T) { func TestSetPropertiesListOfBlobsAndVirtualDirsForBlobTier(t *testing.T) { a := assert.New(t) t.Skip("Enable after setting Account to non-HNS") - bsu := getBSU() + bsc := getBlobServiceClient() vdirName := "megadir" // set up the container with numerous blobs and a vdir - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) - blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) - blobListPart2 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, vdirName+"/", azblob.AccessTierHot) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + a.NotNil(cc) + blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, "", to.Ptr(blob.AccessTierHot)) + blobListPart2 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, vdirName+"/", to.Ptr(blob.AccessTierHot)) blobList := append(blobListPart1, blobListPart2...) a.NotZero(len(blobList)) @@ -424,26 +430,26 @@ func TestSetPropertiesListOfBlobsAndVirtualDirsForBlobTier(t *testing.T) { func TestSetPropertiesListOfBlobsWithIncludeAndExcludeForBlobTier(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() vdirName := "megadir" // set up the container with numerous blobs and a vdir - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) - blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) - scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, vdirName+"/", azblob.AccessTierHot) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + a.NotNil(cc) + blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, "", to.Ptr(blob.AccessTierHot)) + scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, vdirName+"/", to.Ptr(blob.AccessTierHot)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobsToInclude, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) includeString := "*.pdf;*.jpeg;exactName" // add special blobs that we wish to exclude // note that the excluded files also match the include string blobsToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobsToExclude, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) excludeString := "so*;not*;exactName" // set up interceptor @@ -491,15 +497,15 @@ func TestSetPropertiesListOfBlobsWithIncludeAndExcludeForBlobTier(t *testing.T) func TestSetPropertiesSingleBlobWithFromToForBlobTier(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) for _, blobName := range []string{"top/mid/low/singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) - a.NotNil(containerURL) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobList, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) + a.NotNil(cc) // set up interceptor mockedRPC := interceptor{} @@ -529,14 +535,14 @@ func TestSetPropertiesSingleBlobWithFromToForBlobTier(t *testing.T) { func TestSetPropertiesBlobsUnderContainerWithFromToForBlobTier(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, "", to.Ptr(blob.AccessTierHot)) - a.NotNil(containerURL) + a.NotNil(cc) a.NotZero(len(blobList)) // set up interceptor @@ -585,15 +591,15 @@ func TestSetPropertiesBlobsUnderContainerWithFromToForBlobTier(t *testing.T) { func TestSetPropertiesBlobsUnderVirtualDirWithFromToForBlobTier(t *testing.T) { a := assert.New(t) t.Skip("Enable after setting Account to non-HNS") - bsu := getBSU() + bsc := getBlobServiceClient() vdirName := "vdir1/vdir2/vdir3/" // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, vdirName, azblob.AccessTierHot) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, vdirName, to.Ptr(blob.AccessTierHot)) - a.NotNil(containerURL) + a.NotNil(cc) a.NotZero(len(blobList)) // set up interceptor @@ -643,17 +649,17 @@ func TestSetPropertiesBlobsUnderVirtualDirWithFromToForBlobTier(t *testing.T) { func TestSetPropertiesSingleBlobForMetadata(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) for _, blobName := range []string{"top/mid/low/singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} // upload the data with given accessTier - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) - a.NotNil(containerURL) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobList, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) + a.NotNil(cc) // set up interceptor mockedRPC := interceptor{} @@ -681,17 +687,17 @@ func TestSetPropertiesSingleBlobForMetadata(t *testing.T) { func TestSetPropertiesSingleBlobForEmptyMetadata(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) for _, blobName := range []string{"top/mid/low/singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} // upload the data with given accessTier - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) - a.NotNil(containerURL) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobList, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) + a.NotNil(cc) // set up interceptor mockedRPC := interceptor{} @@ -719,13 +725,13 @@ func TestSetPropertiesSingleBlobForEmptyMetadata(t *testing.T) { func TestSetPropertiesBlobsUnderContainerForMetadata(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, "", to.Ptr(blob.AccessTierHot)) + a.NotNil(cc) a.NotZero(len(blobList)) // set up interceptor @@ -771,18 +777,18 @@ func TestSetPropertiesBlobsUnderContainerForMetadata(t *testing.T) { func TestSetPropertiesWithIncludeFlagForMetadata(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, "", to.Ptr(blob.AccessTierHot)) + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobsToInclude, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) includeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -811,18 +817,18 @@ func TestSetPropertiesWithIncludeFlagForMetadata(t *testing.T) { func TestSetPropertiesWithExcludeFlagForMetadata(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, "", to.Ptr(blob.AccessTierHot)) + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotZero(len(blobList)) // add special blobs that we wish to exclude blobsToExclude := []string{"notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobsToExclude, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) excludeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -853,24 +859,24 @@ func TestSetPropertiesWithExcludeFlagForMetadata(t *testing.T) { func TestSetPropertiesWithIncludeAndExcludeFlagForMetadata(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, "", to.Ptr(blob.AccessTierHot)) + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobsToInclude, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) includeString := "*.pdf;*.jpeg;exactName" // add special blobs that we wish to exclude // note that the excluded files also match the include string blobsToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobsToExclude, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) excludeString := "so*;not*;exactName" // set up interceptor @@ -903,15 +909,15 @@ func TestSetPropertiesWithIncludeAndExcludeFlagForMetadata(t *testing.T) { func TestSetPropertiesListOfBlobsAndVirtualDirsForMetadata(t *testing.T) { a := assert.New(t) t.Skip("Enable after setting Account to non-HNS") - bsu := getBSU() + bsc := getBlobServiceClient() vdirName := "megadir" // set up the container with numerous blobs and a vdir - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) - blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) - blobListPart2 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, vdirName+"/", azblob.AccessTierHot) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + a.NotNil(cc) + blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, "", to.Ptr(blob.AccessTierHot)) + blobListPart2 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, vdirName+"/", to.Ptr(blob.AccessTierHot)) blobList := append(blobListPart1, blobListPart2...) a.NotZero(len(blobList)) @@ -974,26 +980,26 @@ func TestSetPropertiesListOfBlobsAndVirtualDirsForMetadata(t *testing.T) { func TestSetPropertiesListOfBlobsWithIncludeAndExcludeForMetadata(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() vdirName := "megadir" // set up the container with numerous blobs and a vdir - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) - blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) - scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, vdirName+"/", azblob.AccessTierHot) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + a.NotNil(cc) + blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, "", to.Ptr(blob.AccessTierHot)) + scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, vdirName+"/", to.Ptr(blob.AccessTierHot)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobsToInclude, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) includeString := "*.pdf;*.jpeg;exactName" // add special blobs that we wish to exclude // note that the excluded files also match the include string blobsToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobsToExclude, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) excludeString := "so*;not*;exactName" // set up interceptor @@ -1041,15 +1047,15 @@ func TestSetPropertiesListOfBlobsWithIncludeAndExcludeForMetadata(t *testing.T) func TestSetPropertiesSingleBlobWithFromToForMetadata(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) for _, blobName := range []string{"top/mid/low/singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) - a.NotNil(containerURL) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobList, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) + a.NotNil(cc) // set up interceptor mockedRPC := interceptor{} @@ -1079,14 +1085,14 @@ func TestSetPropertiesSingleBlobWithFromToForMetadata(t *testing.T) { func TestSetPropertiesBlobsUnderContainerWithFromToForMetadata(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, "", to.Ptr(blob.AccessTierHot)) - a.NotNil(containerURL) + a.NotNil(cc) a.NotZero(len(blobList)) // set up interceptor @@ -1135,15 +1141,15 @@ func TestSetPropertiesBlobsUnderContainerWithFromToForMetadata(t *testing.T) { func TestSetPropertiesBlobsUnderVirtualDirWithFromToForMetadata(t *testing.T) { a := assert.New(t) t.Skip("Enable after setting Account to non-HNS") - bsu := getBSU() + bsc := getBlobServiceClient() vdirName := "vdir1/vdir2/vdir3/" // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, vdirName, azblob.AccessTierHot) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, vdirName, to.Ptr(blob.AccessTierHot)) - a.NotNil(containerURL) + a.NotNil(cc) a.NotZero(len(blobList)) // set up interceptor @@ -1193,17 +1199,17 @@ func TestSetPropertiesBlobsUnderVirtualDirWithFromToForMetadata(t *testing.T) { func TestSetPropertiesSingleBlobForBlobTags(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) for _, blobName := range []string{"top/mid/low/singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} // upload the data with given accessTier - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) - a.NotNil(containerURL) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobList, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) + a.NotNil(cc) // set up interceptor mockedRPC := interceptor{} @@ -1231,17 +1237,17 @@ func TestSetPropertiesSingleBlobForBlobTags(t *testing.T) { func TestSetPropertiesSingleBlobForEmptyBlobTags(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) for _, blobName := range []string{"top/mid/low/singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} // upload the data with given accessTier - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) - a.NotNil(containerURL) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobList, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) + a.NotNil(cc) // set up interceptor mockedRPC := interceptor{} @@ -1269,13 +1275,13 @@ func TestSetPropertiesSingleBlobForEmptyBlobTags(t *testing.T) { func TestSetPropertiesBlobsUnderContainerForBlobTags(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, "", to.Ptr(blob.AccessTierHot)) + a.NotNil(cc) a.NotZero(len(blobList)) // set up interceptor @@ -1321,18 +1327,18 @@ func TestSetPropertiesBlobsUnderContainerForBlobTags(t *testing.T) { func TestSetPropertiesWithIncludeFlagForBlobTags(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, "", to.Ptr(blob.AccessTierHot)) + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobsToInclude, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) includeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -1361,18 +1367,18 @@ func TestSetPropertiesWithIncludeFlagForBlobTags(t *testing.T) { func TestSetPropertiesWithExcludeFlagForBlobTags(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, "", to.Ptr(blob.AccessTierHot)) + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotZero(len(blobList)) // add special blobs that we wish to exclude blobsToExclude := []string{"notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobsToExclude, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) excludeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -1403,24 +1409,24 @@ func TestSetPropertiesWithExcludeFlagForBlobTags(t *testing.T) { func TestSetPropertiesWithIncludeAndExcludeFlagForBlobTags(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, "", to.Ptr(blob.AccessTierHot)) + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobsToInclude, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) includeString := "*.pdf;*.jpeg;exactName" // add special blobs that we wish to exclude // note that the excluded files also match the include string blobsToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobsToExclude, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) excludeString := "so*;not*;exactName" // set up interceptor @@ -1453,15 +1459,15 @@ func TestSetPropertiesWithIncludeAndExcludeFlagForBlobTags(t *testing.T) { func TestSetPropertiesListOfBlobsAndVirtualDirsForBlobTags(t *testing.T) { a := assert.New(t) t.Skip("Enable after setting Account to non-HNS") - bsu := getBSU() + bsc := getBlobServiceClient() vdirName := "megadir" // set up the container with numerous blobs and a vdir - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) - blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) - blobListPart2 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, vdirName+"/", azblob.AccessTierHot) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + a.NotNil(cc) + blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, "", to.Ptr(blob.AccessTierHot)) + blobListPart2 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, vdirName+"/", to.Ptr(blob.AccessTierHot)) blobList := append(blobListPart1, blobListPart2...) a.NotZero(len(blobList)) @@ -1524,26 +1530,26 @@ func TestSetPropertiesListOfBlobsAndVirtualDirsForBlobTags(t *testing.T) { func TestSetPropertiesListOfBlobsWithIncludeAndExcludeForBlobTags(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() vdirName := "megadir" // set up the container with numerous blobs and a vdir - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) - blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) - scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, vdirName+"/", azblob.AccessTierHot) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + a.NotNil(cc) + blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, "", to.Ptr(blob.AccessTierHot)) + scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, vdirName+"/", to.Ptr(blob.AccessTierHot)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobsToInclude, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) includeString := "*.pdf;*.jpeg;exactName" // add special blobs that we wish to exclude // note that the excluded files also match the include string blobsToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobsToExclude, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) excludeString := "so*;not*;exactName" // set up interceptor @@ -1591,15 +1597,15 @@ func TestSetPropertiesListOfBlobsWithIncludeAndExcludeForBlobTags(t *testing.T) func TestSetPropertiesSingleBlobWithFromToForBlobTags(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) for _, blobName := range []string{"top/mid/low/singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} - scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) - a.NotNil(containerURL) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, cc, blobList, blockBlobDefaultData, to.Ptr(blob.AccessTierHot)) + a.NotNil(cc) // set up interceptor mockedRPC := interceptor{} @@ -1629,14 +1635,14 @@ func TestSetPropertiesSingleBlobWithFromToForBlobTags(t *testing.T) { func TestSetPropertiesBlobsUnderContainerWithFromToForBlobTags(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, "", to.Ptr(blob.AccessTierHot)) - a.NotNil(containerURL) + a.NotNil(cc) a.NotZero(len(blobList)) // set up interceptor @@ -1685,15 +1691,15 @@ func TestSetPropertiesBlobsUnderContainerWithFromToForBlobTags(t *testing.T) { func TestSetPropertiesBlobsUnderVirtualDirWithFromToForBlobTags(t *testing.T) { a := assert.New(t) t.Skip("Enable after setting Account to non-HNS") - bsu := getBSU() + bsc := getBlobServiceClient() vdirName := "vdir1/vdir2/vdir3/" // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, vdirName, azblob.AccessTierHot) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, cc, vdirName, to.Ptr(blob.AccessTierHot)) - a.NotNil(containerURL) + a.NotNil(cc) a.NotZero(len(blobList)) // set up interceptor diff --git a/cmd/zt_sync_blob_blob_test.go b/cmd/zt_sync_blob_blob_test.go index 66e82812a..7a438077a 100644 --- a/cmd/zt_sync_blob_blob_test.go +++ b/cmd/zt_sync_blob_blob_test.go @@ -24,31 +24,33 @@ import ( "bytes" "context" "encoding/json" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" "github.com/stretchr/testify/assert" "sort" "strings" "testing" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" ) // regular blob->file sync func TestSyncS2SWithSingleBlob(t *testing.T) { a := assert.New(t) - bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - defer deleteContainer(a, dstContainerURL) + bsc := getBlobServiceClient() + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + defer deleteContainer(a, dstContainerClient) for _, blobName := range []string{"singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the source container with a single blob blobList := []string{blobName} - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, blobList, blockBlobDefaultData) // set up the destination container with the same single blob - scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerClient, blobList, blockBlobDefaultData) // set up interceptor mockedRPC := interceptor{} @@ -69,7 +71,7 @@ func TestSyncS2SWithSingleBlob(t *testing.T) { }) // recreate the source blob to have a later last modified time - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, blobList, blockBlobDefaultData) mockedRPC.reset() runSyncAndVerify(a, raw, func(err error) { @@ -82,14 +84,14 @@ func TestSyncS2SWithSingleBlob(t *testing.T) { // regular container->container sync but destination is empty, so everything has to be transferred func TestSyncS2SWithEmptyDestination(t *testing.T) { a := assert.New(t) - bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - defer deleteContainer(a, dstContainerURL) + bsc := getBlobServiceClient() + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + defer deleteContainer(a, dstContainerClient) // set up the source container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerClient, "") a.NotZero(len(blobList)) // set up interceptor @@ -129,18 +131,18 @@ func TestSyncS2SWithEmptyDestination(t *testing.T) { // regular container->container sync but destination is identical to the source, transfers are scheduled based on lmt func TestSyncS2SWithIdenticalDestination(t *testing.T) { a := assert.New(t) - bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - defer deleteContainer(a, dstContainerURL) + bsc := getBlobServiceClient() + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + defer deleteContainer(a, dstContainerClient) // set up the source container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerClient, "") a.NotZero(len(blobList)) // set up the destination with the exact same files - scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerClient, blobList, blockBlobDefaultData) // set up interceptor mockedRPC := interceptor{} @@ -161,7 +163,7 @@ func TestSyncS2SWithIdenticalDestination(t *testing.T) { }) // refresh the source blobs' last modified time so that they get synced - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, blobList, blockBlobDefaultData) mockedRPC.reset() runSyncAndVerify(a, raw, func(err error) { a.Nil(err) @@ -173,22 +175,22 @@ func TestSyncS2SWithIdenticalDestination(t *testing.T) { func TestSyncS2SWithMismatchedDestination(t *testing.T) { a := assert.New(t) t.Skip("Enable after setting Account to non-HNS") - bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - defer deleteContainer(a, dstContainerURL) + bsc := getBlobServiceClient() + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + defer deleteContainer(a, dstContainerClient) // set up the container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerClient, "") a.NotZero(len(blobList)) // set up the destination with half of the blobs from source - scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, blobList[0:len(blobList)/2], blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerClient, blobList[0:len(blobList)/2], blockBlobDefaultData) expectedOutput := blobList[len(blobList)/2:] // the missing half of source blobs should be transferred // add some extra blobs that shouldn't be included - scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, dstContainerURL, "extra") + scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, dstContainerClient, "extra") // set up interceptor mockedRPC := interceptor{} @@ -206,14 +208,14 @@ func TestSyncS2SWithMismatchedDestination(t *testing.T) { // make sure the extra blobs were deleted extraFilesFound := false - for marker := (azblob.Marker{}); marker.NotDone(); { - listResponse, err := dstContainerURL.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{}) + pager := dstContainerClient.NewListBlobsFlatPager(nil) + for pager.More() { + listResponse, err := pager.NextPage(ctx) a.Nil(err) - marker = listResponse.NextMarker // if ever the extra blobs are found, note it down for _, blob := range listResponse.Segment.BlobItems { - if strings.Contains(blob.Name, "extra") { + if strings.Contains(*blob.Name, "extra") { extraFilesFound = true } } @@ -226,19 +228,19 @@ func TestSyncS2SWithMismatchedDestination(t *testing.T) { // include flag limits the scope of source/destination comparison func TestSyncS2SWithIncludePatternFlag(t *testing.T) { a := assert.New(t) - bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - defer deleteContainer(a, dstContainerURL) + bsc := getBlobServiceClient() + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + defer deleteContainer(a, dstContainerClient) // set up the source container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerClient, "") a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, blobsToInclude, blockBlobDefaultData) includeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -262,19 +264,19 @@ func TestSyncS2SWithIncludePatternFlag(t *testing.T) { // exclude flag limits the scope of source/destination comparison func TestSyncS2SWithExcludePatternFlag(t *testing.T) { a := assert.New(t) - bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - defer deleteContainer(a, dstContainerURL) + bsc := getBlobServiceClient() + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + defer deleteContainer(a, dstContainerClient) // set up the source container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerClient, "") a.NotZero(len(blobList)) // add special blobs that we wish to exclude blobsToExclude := []string{"notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, blobsToExclude, blockBlobDefaultData) excludeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -298,25 +300,25 @@ func TestSyncS2SWithExcludePatternFlag(t *testing.T) { // include and exclude flag can work together to limit the scope of source/destination comparison func TestSyncS2SWithIncludeAndExcludePatternFlag(t *testing.T) { a := assert.New(t) - bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - defer deleteContainer(a, dstContainerURL) + bsc := getBlobServiceClient() + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + defer deleteContainer(a, dstContainerClient) // set up the source container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerClient, "") a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, blobsToInclude, blockBlobDefaultData) includeString := "*.pdf;*.jpeg;exactName" // add special blobs that we wish to exclude // note that the excluded files also match the include string blobsToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, blobsToExclude, blockBlobDefaultData) excludeString := "so*;not*;exactName" // set up interceptor @@ -341,19 +343,19 @@ func TestSyncS2SWithIncludeAndExcludePatternFlag(t *testing.T) { // a specific path is avoided in the comparison func TestSyncS2SWithExcludePathFlag(t *testing.T) { a := assert.New(t) - bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - defer deleteContainer(a, dstContainerURL) + bsc := getBlobServiceClient() + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + defer deleteContainer(a, dstContainerClient) // set up the source container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerClient, "") a.NotZero(len(blobList)) // add special blobs that we wish to exclude blobsToExclude := []string{"excludeSub/notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, blobsToExclude, blockBlobDefaultData) excludeString := "excludeSub;exactName" // set up interceptor @@ -374,10 +376,10 @@ func TestSyncS2SWithExcludePathFlag(t *testing.T) { }) // now set up the destination with the blobs to be excluded, and make sure they are not touched - scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerClient, blobsToExclude, blockBlobDefaultData) // re-create the ones at the source so that their lmts are newer - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, blobsToExclude, blockBlobDefaultData) mockedRPC.reset() runSyncAndVerify(a, raw, func(err error) { @@ -386,7 +388,7 @@ func TestSyncS2SWithExcludePathFlag(t *testing.T) { // make sure the extra blobs were not touched for _, blobName := range blobsToExclude { - exists := scenarioHelper{}.blobExists(dstContainerURL.NewBlobURL(blobName)) + exists := scenarioHelper{}.blobExists(dstContainerClient.NewBlobClient(blobName)) a.True(exists) } }) @@ -395,16 +397,16 @@ func TestSyncS2SWithExcludePathFlag(t *testing.T) { // validate the bug fix for this scenario func TestSyncS2SWithMissingDestination(t *testing.T) { a := assert.New(t) - bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) + bsc := getBlobServiceClient() + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) // delete the destination container to simulate non-existing destination, or recently removed destination - deleteContainer(a, dstContainerURL) + deleteContainer(a, dstContainerClient) // set up the container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerClient, "") a.NotZero(len(blobList)) // set up interceptor @@ -430,19 +432,19 @@ func TestSyncS2SWithMissingDestination(t *testing.T) { // there is a type mismatch between the source and destination func TestSyncS2SMismatchContainerAndBlob(t *testing.T) { a := assert.New(t) - bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - defer deleteContainer(a, dstContainerURL) + bsc := getBlobServiceClient() + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + defer deleteContainer(a, dstContainerClient) // set up the source container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerClient, "") a.NotZero(len(blobList)) // set up the destination container with a single blob singleBlobName := "single" - scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, []string{singleBlobName}, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerClient, []string{singleBlobName}, blockBlobDefaultData) // set up interceptor mockedRPC := interceptor{} @@ -477,14 +479,14 @@ func TestSyncS2SMismatchContainerAndBlob(t *testing.T) { // container <-> virtual dir sync func TestSyncS2SContainerAndEmptyVirtualDir(t *testing.T) { a := assert.New(t) - bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - defer deleteContainer(a, dstContainerURL) + bsc := getBlobServiceClient() + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + defer deleteContainer(a, dstContainerClient) // set up the source container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerClient, "") a.NotZero(len(blobList)) // set up interceptor @@ -525,19 +527,19 @@ func TestSyncS2SContainerAndEmptyVirtualDir(t *testing.T) { // regular vdir -> vdir sync func TestSyncS2SBetweenVirtualDirs(t *testing.T) { a := assert.New(t) - bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - defer deleteContainer(a, dstContainerURL) + bsc := getBlobServiceClient() + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + defer deleteContainer(a, dstContainerClient) // set up the source container with numerous blobs vdirName := "vdir" - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, vdirName+common.AZCOPY_PATH_SEPARATOR_STRING) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerClient, vdirName+common.AZCOPY_PATH_SEPARATOR_STRING) a.NotZero(len(blobList)) // set up the destination with the exact same files - scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerClient, blobList, blockBlobDefaultData) // set up interceptor mockedRPC := interceptor{} @@ -560,7 +562,7 @@ func TestSyncS2SBetweenVirtualDirs(t *testing.T) { }) // refresh the blobs' last modified time so that they are newer - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, blobList, blockBlobDefaultData) mockedRPC.reset() expectedList := scenarioHelper{}.shaveOffPrefix(blobList, vdirName+common.AZCOPY_PATH_SEPARATOR_STRING) runSyncAndVerify(a, raw, func(err error) { @@ -574,23 +576,22 @@ func TestSyncS2SBetweenVirtualDirs(t *testing.T) { func TestSyncS2SBetweenVirtualDirsWithConflictingBlob(t *testing.T) { a := assert.New(t) t.Skip("Enable after setting Account to non-HNS") - bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - defer deleteContainer(a, dstContainerURL) + bsc := getBlobServiceClient() + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + defer deleteContainer(a, dstContainerClient) // set up the source container with numerous blobs vdirName := "vdir" - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, - vdirName+common.AZCOPY_PATH_SEPARATOR_STRING) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerClient, vdirName+common.AZCOPY_PATH_SEPARATOR_STRING) a.NotZero(len(blobList)) // set up the destination with the exact same files - scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerClient, blobList, blockBlobDefaultData) // create a blob at the destination with the exact same name as the vdir - scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, []string{vdirName}, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerClient, []string{vdirName}, blockBlobDefaultData) // set up interceptor mockedRPC := interceptor{} @@ -622,7 +623,7 @@ func TestSyncS2SBetweenVirtualDirsWithConflictingBlob(t *testing.T) { // case 3: blob -> blob: if source is also a blob, then single blob to blob sync happens // create a blob at the source with the exact same name as the vdir - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, []string{vdirName}, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, []string{vdirName}, blockBlobDefaultData) raw = getDefaultSyncRawInput(srcContainerURLWithSAS.String(), dstContainerURLWithSAS.String()) runSyncAndVerify(a, raw, func(err error) { a.Nil(err) @@ -630,7 +631,7 @@ func TestSyncS2SBetweenVirtualDirsWithConflictingBlob(t *testing.T) { }) // refresh the dst blobs' last modified time so that they are newer - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, blobList, blockBlobDefaultData) mockedRPC.reset() // case 4: vdir -> vdir: adding a trailing slash helps to clarify it should be treated as virtual dir @@ -648,24 +649,25 @@ func TestSyncS2SBetweenVirtualDirsWithConflictingBlob(t *testing.T) { // we should recognize this and sync with the virtual directory instead func TestSyncS2SADLSDirectory(t *testing.T) { a := assert.New(t) - bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - defer deleteContainer(a, dstContainerURL) + bsc := getBlobServiceClient() + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + defer deleteContainer(a, dstContainerClient) // set up the source container with numerous blobs vdirName := "vdir" - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, - vdirName+common.AZCOPY_PATH_SEPARATOR_STRING) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerClient, vdirName+common.AZCOPY_PATH_SEPARATOR_STRING) a.NotZero(len(blobList)) // set up the destination with the exact same files - scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerClient, blobList, blockBlobDefaultData) // create an ADLS Gen2 directory at the source with the exact same name as the vdir - _, err := srcContainerURL.NewBlockBlobURL(vdirName).Upload(context.Background(), bytes.NewReader(nil), - azblob.BlobHTTPHeaders{}, azblob.Metadata{"hdi_isfolder": "true"}, azblob.BlobAccessConditions{}, azblob.DefaultAccessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) + _, err := srcContainerClient.NewBlockBlobClient(vdirName).Upload(context.Background(), streaming.NopCloser(bytes.NewReader(nil)), + &blockblob.UploadOptions{ + Metadata: map[string]*string{"hdi_isfolder": to.Ptr("true")}, + }) a.Nil(err) // set up interceptor @@ -689,7 +691,7 @@ func TestSyncS2SADLSDirectory(t *testing.T) { }) // refresh the sources blobs' last modified time so that they are newer - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, blobList, blockBlobDefaultData) mockedRPC.reset() expectedTransfers := scenarioHelper{}.shaveOffPrefix(blobList, vdirName+common.AZCOPY_PATH_SEPARATOR_STRING) @@ -702,19 +704,19 @@ func TestSyncS2SADLSDirectory(t *testing.T) { // testing multiple include regular expression func TestSyncS2SWithIncludeRegexFlag(t *testing.T) { a := assert.New(t) - bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - defer deleteContainer(a, dstContainerURL) + bsc := getBlobServiceClient() + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + defer deleteContainer(a, dstContainerClient) // set up the source container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerClient, "") a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"tessssssssssssst.txt", "zxcfile.txt", "subOne/tetingessssss.jpeg", "subOne/subTwo/tessssst.pdf"} - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, blobsToInclude, blockBlobDefaultData) includeString := "es{4,};^zxc" // set up interceptor @@ -749,19 +751,19 @@ func TestSyncS2SWithIncludeRegexFlag(t *testing.T) { // testing multiple exclude regular expressions func TestSyncS2SWithExcludeRegexFlag(t *testing.T) { a := assert.New(t) - bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - defer deleteContainer(a, dstContainerURL) + bsc := getBlobServiceClient() + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + defer deleteContainer(a, dstContainerClient) // set up the source container with blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerClient, "") a.NotZero(len(blobList)) // add special blobs that we wish to exclude blobsToExclude := []string{"tessssssssssssst.txt", "subOne/dogs.jpeg", "subOne/subTwo/tessssst.pdf"} - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, blobsToExclude, blockBlobDefaultData) excludeString := "es{4,};o(g)" // set up interceptor @@ -788,24 +790,24 @@ func TestSyncS2SWithExcludeRegexFlag(t *testing.T) { // testing with both include and exclude regular expression flags func TestSyncS2SWithIncludeAndExcludeRegexFlag(t *testing.T) { a := assert.New(t) - bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) - defer deleteContainer(a, dstContainerURL) + bsc := getBlobServiceClient() + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) + defer deleteContainer(a, dstContainerClient) // set up the source container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerClient, "") a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"tessssssssssssst.txt", "zxcfile.txt", "subOne/tetingessssss.jpeg"} - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, blobsToInclude, blockBlobDefaultData) includeString := "es{4,};^zxc" // add special blobs that we wish to exclude blobsToExclude := []string{"zxca.txt", "subOne/dogs.jpeg", "subOne/subTwo/zxcat.pdf"} - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, blobsToExclude, blockBlobDefaultData) excludeString := "^zxca;o(g)" // set up interceptor @@ -840,19 +842,19 @@ func TestSyncS2SWithIncludeAndExcludeRegexFlag(t *testing.T) { func TestDryrunSyncBlobtoBlob(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up src container - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) blobsToInclude := []string{"AzURE2.jpeg", "sub1/aTestOne.txt", "sub1/sub2/testTwo.pdf"} - scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerClient, blobsToInclude, blockBlobDefaultData) // set up dst container - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, dstContainerURL) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, dstContainerClient) blobsToDelete := []string{"testThree.jpeg"} - scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, blobsToDelete, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerClient, blobsToDelete, blockBlobDefaultData) // set up interceptor mockedRPC := interceptor{} @@ -876,11 +878,11 @@ func TestDryrunSyncBlobtoBlob(t *testing.T) { sort.Strings(msg) for i := 0; i < len(msg); i++ { if strings.Contains(msg[i], "DRYRUN: remove") { - a.True(strings.Contains(msg[i], dstContainerURL.String())) + a.True(strings.Contains(msg[i], dstContainerClient.URL())) } else { a.True(strings.Contains(msg[i], "DRYRUN: copy")) - a.True(strings.Contains(msg[i], srcContainerURL.String())) - a.True(strings.Contains(msg[i], dstContainerURL.String())) + a.True(strings.Contains(msg[i], srcContainerClient.URL())) + a.True(strings.Contains(msg[i], dstContainerClient.URL())) } } @@ -891,17 +893,17 @@ func TestDryrunSyncBlobtoBlob(t *testing.T) { func TestDryrunSyncBlobtoBlobJson(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up src container - srcContainerURL, srcContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, srcContainerURL) + srcContainerClient, srcContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, srcContainerClient) // set up dst container - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, dstContainerURL) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, dstContainerClient) blobsToDelete := []string{"testThree.jpeg"} - scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, blobsToDelete, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerClient, blobsToDelete, blockBlobDefaultData) // set up interceptor mockedRPC := interceptor{} diff --git a/cmd/zt_sync_blob_local_test.go b/cmd/zt_sync_blob_local_test.go index 3ff73d261..70c52cb08 100644 --- a/cmd/zt_sync_blob_local_test.go +++ b/cmd/zt_sync_blob_local_test.go @@ -23,6 +23,9 @@ package cmd import ( "bytes" "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" "github.com/stretchr/testify/assert" "os" "path/filepath" @@ -31,7 +34,6 @@ import ( "time" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" ) const ( @@ -41,15 +43,15 @@ const ( // regular blob->file sync func TestSyncDownloadWithSingleFile(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) for _, blobName := range []string{"singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobList, blockBlobDefaultData) - a.NotNil(containerURL) + scenarioHelper{}.generateBlobsFromList(a, cc, blobList, blockBlobDefaultData) + a.NotNil(cc) // set up the destination as a single file time.Sleep(time.Second) @@ -79,7 +81,7 @@ func TestSyncDownloadWithSingleFile(t *testing.T) { time.Sleep(5 * time.Second) // recreate the blob to have a later last modified time time.Sleep(time.Second) - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, blobList, blockBlobDefaultData) mockedRPC.reset() runSyncAndVerify(a, raw, func(err error) { @@ -93,13 +95,13 @@ func TestSyncDownloadWithSingleFile(t *testing.T) { // regular container->directory sync but destination is empty, so everything has to be transferred func TestSyncDownloadWithEmptyDestination(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotZero(len(blobList)) // set up the destination with an empty folder @@ -142,13 +144,13 @@ func TestSyncDownloadWithEmptyDestination(t *testing.T) { // regular container->directory sync but destination is identical to the source, transfers are scheduled based on lmt func TestSyncDownloadWithIdenticalDestination(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotZero(len(blobList)) // set up the destination with a folder that have the exact same files @@ -173,7 +175,7 @@ func TestSyncDownloadWithIdenticalDestination(t *testing.T) { }) // refresh the blobs' last modified time so that they are newer - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, blobList, blockBlobDefaultData) mockedRPC.reset() runSyncAndVerify(a, raw, func(err error) { @@ -185,13 +187,13 @@ func TestSyncDownloadWithIdenticalDestination(t *testing.T) { // regular container->directory sync where destination is missing some files from source, and also has some extra files func TestSyncDownloadWithMismatchedDestination(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotZero(len(blobList)) // set up the destination with a folder that have half of the files from source @@ -230,18 +232,19 @@ func TestSyncDownloadWithMismatchedDestination(t *testing.T) { // include flag limits the scope of source/destination comparison func TestSyncDownloadWithIncludePatternFlag(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, blobsToInclude, blockBlobDefaultData) + includeString := "*.pdf;*.jpeg;exactName" // set up the destination with an empty folder @@ -267,18 +270,18 @@ func TestSyncDownloadWithIncludePatternFlag(t *testing.T) { // exclude flag limits the scope of source/destination comparison func TestSyncDownloadWithExcludePatternFlag(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotZero(len(blobList)) // add special blobs that we wish to exclude blobsToExclude := []string{"notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, blobsToExclude, blockBlobDefaultData) excludeString := "*.pdf;*.jpeg;exactName" // set up the destination with an empty folder @@ -304,24 +307,24 @@ func TestSyncDownloadWithExcludePatternFlag(t *testing.T) { // include and exclude flag can work together to limit the scope of source/destination comparison func TestSyncDownloadWithIncludeAndExcludePatternFlag(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, blobsToInclude, blockBlobDefaultData) includeString := "*.pdf;*.jpeg;exactName" // add special blobs that we wish to exclude // note that the excluded files also match the include string blobsToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, blobsToExclude, blockBlobDefaultData) excludeString := "so*;not*;exactName" // set up the destination with an empty folder @@ -348,18 +351,19 @@ func TestSyncDownloadWithIncludeAndExcludePatternFlag(t *testing.T) { // a specific path is avoided in the comparison func TestSyncDownloadWithExcludePathFlag(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotZero(len(blobList)) // add special blobs that we wish to exclude blobsToExclude := []string{"excludeSub/notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, blobsToExclude, blockBlobDefaultData) + excludeString := "excludeSub;exactName" // set up the destination with an empty folder @@ -385,7 +389,7 @@ func TestSyncDownloadWithExcludePathFlag(t *testing.T) { scenarioHelper{}.generateLocalFilesFromList(a, dstDirName, blobsToExclude) // re-create the ones at the source so that their lmts are newer - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, blobsToExclude, blockBlobDefaultData) mockedRPC.reset() runSyncAndVerify(a, raw, func(err error) { @@ -403,13 +407,13 @@ func TestSyncDownloadWithExcludePathFlag(t *testing.T) { // validate the bug fix for this scenario func TestSyncDownloadWithMissingDestination(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotZero(len(blobList)) // set up the destination as a missing folder @@ -438,13 +442,13 @@ func TestSyncDownloadWithMissingDestination(t *testing.T) { // there is a type mismatch between the source and destination func TestSyncMismatchContainerAndFile(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, cc, "") + defer deleteContainer(a, cc) + a.NotNil(cc) a.NotZero(len(blobList)) // set up the destination as a single file @@ -485,15 +489,15 @@ func TestSyncMismatchContainerAndFile(t *testing.T) { // there is a type mismatch between the source and destination func TestSyncMismatchBlobAndDirectory(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the container with a single blob blobName := "singleblobisbest" blobList := []string{blobName} - containerURL, containerName := createNewContainer(a, bsu) - scenarioHelper{}.generateBlobsFromList(a, containerURL, blobList, blockBlobDefaultData) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + scenarioHelper{}.generateBlobsFromList(a, cc, blobList, blockBlobDefaultData) + defer deleteContainer(a, cc) + a.NotNil(cc) // set up the destination as a directory dstDirName := scenarioHelper{}.generateLocalDirectory(a) @@ -532,7 +536,7 @@ func TestSyncMismatchBlobAndDirectory(t *testing.T) { // we should recognize that there is a type mismatch func TestSyncDownloadADLSDirectoryTypeMismatch(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() blobName := "adlsdir" // set up the destination as a single file @@ -542,13 +546,15 @@ func TestSyncDownloadADLSDirectoryTypeMismatch(t *testing.T) { scenarioHelper{}.generateLocalFilesFromList(a, dstDirName, []string{blobName}) // set up the container - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + containerClient, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, containerClient) + a.NotNil(containerClient) // create a single blob that represents an ADLS directory - _, err := containerURL.NewBlockBlobURL(blobName).Upload(context.Background(), bytes.NewReader(nil), - azblob.BlobHTTPHeaders{}, azblob.Metadata{"hdi_isfolder": "true"}, azblob.BlobAccessConditions{}, azblob.DefaultAccessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) + _, err := containerClient.NewBlockBlobClient(blobName).Upload(context.Background(), streaming.NopCloser(bytes.NewReader(nil)), + &blockblob.UploadOptions{ + Metadata: map[string]*string{"hdi_isfolder": to.Ptr("true")}, + }) a.Nil(err) // set up interceptor @@ -573,25 +579,28 @@ func TestSyncDownloadADLSDirectoryTypeMismatch(t *testing.T) { // we should download every blob except the blob representing the directory func TestSyncDownloadWithADLSDirectory(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() adlsDirName := "adlsdir" // set up the container with numerous blobs - containerURL, containerName := createNewContainer(a, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, adlsDirName+"/") - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + containerClient, containerName := createNewContainer(a, bsc) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerClient, adlsDirName+"/") + defer deleteContainer(a, containerClient) + a.NotNil(containerClient) a.NotZero(len(blobList)) // create a single blob that represents the ADLS directory - dirBlob := containerURL.NewBlockBlobURL(adlsDirName) - _, err := dirBlob.Upload(context.Background(), bytes.NewReader(nil), - azblob.BlobHTTPHeaders{}, azblob.Metadata{"hdi_isfolder": "true"}, azblob.BlobAccessConditions{}, azblob.DefaultAccessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) + _, err := containerClient.NewBlockBlobClient(adlsDirName).Upload(context.Background(), streaming.NopCloser(bytes.NewReader(nil)), + &blockblob.UploadOptions{ + Metadata: map[string]*string{"hdi_isfolder": to.Ptr("true")}, + }) a.Nil(err) // create an extra blob that represents an empty ADLS directory, which should never be picked up - _, err = containerURL.NewBlockBlobURL(adlsDirName+"/neverpickup").Upload(context.Background(), bytes.NewReader(nil), - azblob.BlobHTTPHeaders{}, azblob.Metadata{"hdi_isfolder": "true"}, azblob.BlobAccessConditions{}, azblob.DefaultAccessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) + _, err = containerClient.NewBlockBlobClient(adlsDirName+"/neverpickup").Upload(context.Background(), streaming.NopCloser(bytes.NewReader(nil)), + &blockblob.UploadOptions{ + Metadata: map[string]*string{"hdi_isfolder": to.Ptr("true")}, + }) a.Nil(err) // set up the destination with an empty folder diff --git a/cmd/zt_sync_comparator_test.go b/cmd/zt_sync_comparator_test.go index 1eb7867ec..013ea3ab4 100644 --- a/cmd/zt_sync_comparator_test.go +++ b/cmd/zt_sync_comparator_test.go @@ -23,7 +23,6 @@ package cmd import ( "context" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-file-go/azfile" "github.com/stretchr/testify/assert" "os" "sort" @@ -35,19 +34,19 @@ import ( // regular file->file sync func TestFileSyncS2SWithSingleFile(t *testing.T) { a := assert.New(t) - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(a, fsu) - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteShare(a, srcShareURL) - defer deleteShare(a, dstShareURL) + fsc := getFileServiceClient() + srcShareClient, srcShareName := createNewShare(a, fsc) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteShare(a, srcShareClient) + defer deleteShare(a, dstShareClient) for _, fileName := range []string{"singlefileisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the source share with a single file fileList := []string{fileName} - scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, fileList) + scenarioHelper{}.generateShareFilesFromList(a, srcShareClient, fsc, fileList) // set up the destination share with the same single file - scenarioHelper{}.generateAzureFilesFromList(a, dstShareURL, fileList) + scenarioHelper{}.generateShareFilesFromList(a, dstShareClient, fsc, fileList) // set up interceptor mockedRPC := interceptor{} @@ -68,7 +67,7 @@ func TestFileSyncS2SWithSingleFile(t *testing.T) { }) // recreate the source file to have a later last modified time - scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, fileList) + scenarioHelper{}.generateShareFilesFromList(a, srcShareClient, fsc, fileList) mockedRPC.reset() runSyncAndVerify(a, raw, func(err error) { @@ -81,14 +80,14 @@ func TestFileSyncS2SWithSingleFile(t *testing.T) { // regular share->share sync but destination is empty, so everything has to be transferred func TestFileSyncS2SWithEmptyDestination(t *testing.T) { a := assert.New(t) - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(a, fsu) - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteShare(a, srcShareURL) - defer deleteShare(a, dstShareURL) + fsc := getFileServiceClient() + srcShareClient, srcShareName := createNewShare(a, fsc) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteShare(a, srcShareClient) + defer deleteShare(a, dstShareClient) // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareClient, fsc, "") a.NotZero(len(fileList)) // set up interceptor @@ -129,18 +128,18 @@ func TestFileSyncS2SWithEmptyDestination(t *testing.T) { // regular share->share sync but destination is identical to the source, transfers are scheduled based on lmt func TestFileSyncS2SWithIdenticalDestination(t *testing.T) { a := assert.New(t) - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(a, fsu) - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteShare(a, srcShareURL) - defer deleteShare(a, dstShareURL) + fsc := getFileServiceClient() + srcShareClient, srcShareName := createNewShare(a, fsc) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteShare(a, srcShareClient) + defer deleteShare(a, dstShareClient) // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareClient, fsc,"") a.NotZero(len(fileList)) // set up the destination with the exact same files - scenarioHelper{}.generateAzureFilesFromList(a, dstShareURL, fileList) + scenarioHelper{}.generateShareFilesFromList(a, dstShareClient, fsc, fileList) // set up interceptor mockedRPC := interceptor{} @@ -161,7 +160,7 @@ func TestFileSyncS2SWithIdenticalDestination(t *testing.T) { }) // refresh the source files' last modified time so that they get synced - scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, fileList) + scenarioHelper{}.generateShareFilesFromList(a, srcShareClient, fsc, fileList) mockedRPC.reset() runSyncAndVerify(a, raw, func(err error) { a.Nil(err) @@ -172,23 +171,23 @@ func TestFileSyncS2SWithIdenticalDestination(t *testing.T) { // regular share->share sync where destination is missing some files from source, and also has some extra files func TestFileSyncS2SWithMismatchedDestination(t *testing.T) { a := assert.New(t) - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(a, fsu) - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteShare(a, srcShareURL) - defer deleteShare(a, dstShareURL) + fsc := getFileServiceClient() + srcShareClient, srcShareName := createNewShare(a, fsc) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteShare(a, srcShareClient) + defer deleteShare(a, dstShareClient) // set up the share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareClient, fsc, "") a.NotZero(len(fileList)) // set up the destination with half of the files from source filesAlreadyAtDestination := fileList[0 : len(fileList)/2] - scenarioHelper{}.generateAzureFilesFromList(a, dstShareURL, filesAlreadyAtDestination) + scenarioHelper{}.generateShareFilesFromList(a, dstShareClient, fsc, filesAlreadyAtDestination) expectedOutput := fileList[len(fileList)/2:] // the missing half of source files should be transferred // add some extra files that shouldn't be included - scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, dstShareURL, "extra") + scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, dstShareClient, fsc, "extra") // set up interceptor mockedRPC := interceptor{} @@ -215,14 +214,14 @@ func TestFileSyncS2SWithMismatchedDestination(t *testing.T) { // make sure the extra files were deleted extraFilesFound := false - for marker := (azfile.Marker{}); marker.NotDone(); { - listResponse, err := dstShareURL.NewRootDirectoryURL().ListFilesAndDirectoriesSegment(ctx, marker, azfile.ListFilesAndDirectoriesOptions{}) + pager := dstShareClient.NewRootDirectoryClient().NewListFilesAndDirectoriesPager(nil) + for pager.More() { + listResponse, err := pager.NextPage(ctx) a.Nil(err) - marker = listResponse.NextMarker // if ever the extra files are found, note it down - for _, file := range listResponse.FileItems { - if strings.Contains(file.Name, "extra") { + for _, file := range listResponse.Segment.Files { + if strings.Contains(*file.Name, "extra") { extraFilesFound = true } } @@ -235,19 +234,19 @@ func TestFileSyncS2SWithMismatchedDestination(t *testing.T) { // include flag limits the scope of source/destination comparison func TestFileSyncS2SWithIncludeFlag(t *testing.T) { a := assert.New(t) - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(a, fsu) - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteShare(a, srcShareURL) - defer deleteShare(a, dstShareURL) + fsc := getFileServiceClient() + srcShareClient, srcShareName := createNewShare(a, fsc) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteShare(a, srcShareClient) + defer deleteShare(a, dstShareClient) // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareClient, fsc, "") a.NotZero(len(fileList)) // add special files that we wish to include filesToInclude := []string{"important.pdf", "includeSub/amazing.jpeg", "exactName"} - scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, filesToInclude) + scenarioHelper{}.generateShareFilesFromList(a, srcShareClient, fsc, filesToInclude) includeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -271,19 +270,19 @@ func TestFileSyncS2SWithIncludeFlag(t *testing.T) { // exclude flag limits the scope of source/destination comparison func TestFileSyncS2SWithExcludeFlag(t *testing.T) { a := assert.New(t) - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(a, fsu) - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteShare(a, srcShareURL) - defer deleteShare(a, dstShareURL) + fsc := getFileServiceClient() + srcShareClient, srcShareName := createNewShare(a, fsc) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteShare(a, srcShareClient) + defer deleteShare(a, dstShareClient) // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareClient, fsc, "") a.NotZero(len(fileList)) // add special files that we wish to exclude filesToExclude := []string{"notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, filesToExclude) + scenarioHelper{}.generateShareFilesFromList(a, srcShareClient, fsc, filesToExclude) excludeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -307,25 +306,25 @@ func TestFileSyncS2SWithExcludeFlag(t *testing.T) { // include and exclude flag can work together to limit the scope of source/destination comparison func TestFileSyncS2SWithIncludeAndExcludeFlag(t *testing.T) { a := assert.New(t) - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(a, fsu) - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteShare(a, srcShareURL) - defer deleteShare(a, dstShareURL) + fsc := getFileServiceClient() + srcShareClient, srcShareName := createNewShare(a, fsc) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteShare(a, srcShareClient) + defer deleteShare(a, dstShareClient) // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareClient, fsc, "") a.NotZero(len(fileList)) // add special files that we wish to include filesToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, filesToInclude) + scenarioHelper{}.generateShareFilesFromList(a, srcShareClient, fsc, filesToInclude) includeString := "*.pdf;*.jpeg;exactName" // add special files that we wish to exclude // note that the excluded files also match the include string filesToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, filesToExclude) + scenarioHelper{}.generateShareFilesFromList(a, srcShareClient, fsc, filesToExclude) excludeString := "so*;not*;exactName" // set up interceptor @@ -351,13 +350,13 @@ func TestFileSyncS2SWithIncludeAndExcludeFlag(t *testing.T) { // // validate the bug fix for this scenario // func TestFileSyncS2SWithMissingDestination(t *testing.T) { // a := assert.New(t) -// fsu := getFSU() +// fsc := getFileServiceClient() // srcShareURL, srcShareName := createNewAzureShare(a, fsu) // dstShareURL, dstShareName := createNewAzureShare(a, fsu) -// defer deleteShare(a, srcShareURL) +// defer deleteShareV1(a, srcShareURL) // // // delete the destination share to simulate non-existing destination, or recently removed destination -// deleteShare(a, dstShareURL) +// deleteShareV1(a, dstShareURL) // // // set up the share with numerous files // fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") @@ -386,19 +385,19 @@ func TestFileSyncS2SWithIncludeAndExcludeFlag(t *testing.T) { // there is a type mismatch between the source and destination func TestFileSyncS2SMismatchShareAndFile(t *testing.T) { a := assert.New(t) - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(a, fsu) - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteShare(a, srcShareURL) - defer deleteShare(a, dstShareURL) + fsc := getFileServiceClient() + srcShareClient, srcShareName := createNewShare(a, fsc) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteShare(a, srcShareClient) + defer deleteShare(a, dstShareClient) // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareClient, fsc, "") a.NotZero(len(fileList)) // set up the destination share with a single file singleFileName := "single" - scenarioHelper{}.generateAzureFilesFromList(a, dstShareURL, []string{singleFileName}) + scenarioHelper{}.generateShareFilesFromList(a, dstShareClient, fsc, []string{singleFileName}) // set up interceptor mockedRPC := interceptor{} @@ -433,14 +432,14 @@ func TestFileSyncS2SMismatchShareAndFile(t *testing.T) { // share <-> dir sync func TestFileSyncS2SShareAndEmptyDir(t *testing.T) { a := assert.New(t) - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(a, fsu) - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteShare(a, srcShareURL) - defer deleteShare(a, dstShareURL) + fsc := getFileServiceClient() + srcShareClient, srcShareName := createNewShare(a, fsc) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteShare(a, srcShareClient) + defer deleteShare(a, dstShareClient) // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareClient, fsc, "") a.NotZero(len(fileList)) // set up interceptor @@ -451,7 +450,7 @@ func TestFileSyncS2SShareAndEmptyDir(t *testing.T) { // construct the raw input to simulate user input srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, srcShareName) dirName := "emptydir" - _, err := dstShareURL.NewDirectoryURL(dirName).Create(context.Background(), azfile.Metadata{}, azfile.SMBProperties{}) + _, err := dstShareClient.NewDirectoryClient(dirName).Create(context.Background(), nil) a.Nil(err) dstDirURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(a, dstShareName, dirName) raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstDirURLWithSAS.String()) @@ -485,19 +484,19 @@ func TestFileSyncS2SShareAndEmptyDir(t *testing.T) { // regular dir -> dir sync func TestFileSyncS2SBetweenDirs(t *testing.T) { a := assert.New(t) - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(a, fsu) - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteShare(a, srcShareURL) - defer deleteShare(a, dstShareURL) + fsc := getFileServiceClient() + srcShareClient, srcShareName := createNewShare(a, fsc) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteShare(a, srcShareClient) + defer deleteShare(a, dstShareClient) // set up the source share with numerous files dirName := "dir" - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, dirName+common.AZCOPY_PATH_SEPARATOR_STRING) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareClient, fsc, dirName+common.AZCOPY_PATH_SEPARATOR_STRING) a.NotZero(len(fileList)) // set up the destination with the exact same files - scenarioHelper{}.generateAzureFilesFromList(a, dstShareURL, fileList) + scenarioHelper{}.generateShareFilesFromList(a, dstShareClient, fsc, fileList) // set up interceptor mockedRPC := interceptor{} @@ -520,7 +519,7 @@ func TestFileSyncS2SBetweenDirs(t *testing.T) { }) // refresh the files' last modified time so that they are newer - scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, fileList) + scenarioHelper{}.generateShareFilesFromList(a, srcShareClient, fsc, fileList) mockedRPC.reset() expectedList := scenarioHelper{}.shaveOffPrefix(fileList, dirName+common.AZCOPY_PATH_SEPARATOR_STRING) runSyncAndVerify(a, raw, func(err error) { @@ -531,19 +530,19 @@ func TestFileSyncS2SBetweenDirs(t *testing.T) { func TestDryrunSyncFiletoFile(t *testing.T) { a := assert.New(t) - fsu := getFSU() + fsc := getFileServiceClient() //set up src share filesToInclude := []string{"AzURE2.jpeg", "TestOne.txt"} - srcShareURL, srcShareName := createNewAzureShare(a, fsu) - defer deleteShare(a, srcShareURL) - scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, filesToInclude) + srcShareClient, srcShareName := createNewShare(a, fsc) + defer deleteShare(a, srcShareClient) + scenarioHelper{}.generateShareFilesFromList(a, srcShareClient, fsc, filesToInclude) //set up dst share - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteShare(a, dstShareURL) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteShare(a, dstShareClient) fileToDelete := []string{"testThree.jpeg"} - scenarioHelper{}.generateAzureFilesFromList(a, dstShareURL, fileToDelete) + scenarioHelper{}.generateShareFilesFromList(a, dstShareClient, fsc, fileToDelete) // set up interceptor mockedRPC := interceptor{} @@ -567,11 +566,11 @@ func TestDryrunSyncFiletoFile(t *testing.T) { sort.Strings(msg) for i := 0; i < len(msg); i++ { if strings.Contains(msg[i], "DRYRUN: remove") { - a.True(strings.Contains(msg[i], dstShareURL.String())) + a.True(strings.Contains(msg[i], dstShareClient.URL())) } else { a.True(strings.Contains(msg[i], "DRYRUN: copy")) a.True(strings.Contains(msg[i], srcShareName)) - a.True(strings.Contains(msg[i], dstShareURL.String())) + a.True(strings.Contains(msg[i], dstShareClient.URL())) } } @@ -582,7 +581,7 @@ func TestDryrunSyncFiletoFile(t *testing.T) { func TestDryrunSyncLocaltoFile(t *testing.T) { a := assert.New(t) - fsu := getFSU() + fsc := getFileServiceClient() //set up local src blobsToInclude := []string{"AzURE2.jpeg"} @@ -591,10 +590,10 @@ func TestDryrunSyncLocaltoFile(t *testing.T) { scenarioHelper{}.generateLocalFilesFromList(a, srcDirName, blobsToInclude) //set up dst share - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteShare(a, dstShareURL) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteShare(a, dstShareClient) fileToDelete := []string{"testThree.jpeg"} - scenarioHelper{}.generateAzureFilesFromList(a, dstShareURL, fileToDelete) + scenarioHelper{}.generateShareFilesFromList(a, dstShareClient, fsc, fileToDelete) // set up interceptor mockedRPC := interceptor{} @@ -617,11 +616,11 @@ func TestDryrunSyncLocaltoFile(t *testing.T) { sort.Strings(msg) for i := 0; i < len(msg); i++ { if strings.Contains(msg[i], "DRYRUN: remove") { - a.True(strings.Contains(msg[i], dstShareURL.String())) + a.True(strings.Contains(msg[i], dstShareClient.URL())) } else { a.True(strings.Contains(msg[i], "DRYRUN: copy")) a.True(strings.Contains(msg[i], srcDirName)) - a.True(strings.Contains(msg[i], dstShareURL.String())) + a.True(strings.Contains(msg[i], dstShareClient.URL())) } } @@ -633,18 +632,18 @@ func TestDryrunSyncLocaltoFile(t *testing.T) { // regular share->share sync but destination is identical to the source, transfers are scheduled based on lmt func TestFileSyncS2SWithIdenticalDestinationTemp(t *testing.T) { a := assert.New(t) - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(a, fsu) - dstShareURL, dstShareName := createNewAzureShare(a, fsu) - defer deleteShare(a, srcShareURL) - defer deleteShare(a, dstShareURL) + fsc := getFileServiceClient() + srcShareClient, srcShareName := createNewShare(a, fsc) + dstShareClient, dstShareName := createNewShare(a, fsc) + defer deleteShare(a, srcShareClient) + defer deleteShare(a, dstShareClient) // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareClient, fsc, "") a.NotZero(len(fileList)) // set up the destination with the exact same files - scenarioHelper{}.generateAzureFilesFromList(a, dstShareURL, fileList) + scenarioHelper{}.generateShareFilesFromList(a, dstShareClient, fsc, fileList) // set up interceptor mockedRPC := interceptor{} @@ -666,10 +665,10 @@ func TestFileSyncS2SWithIdenticalDestinationTemp(t *testing.T) { }) // refresh the source files' last modified time so that they get synced - scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, fileList) + scenarioHelper{}.generateShareFilesFromList(a, srcShareClient, fsc, fileList) mockedRPC.reset() - currentTime := time.Now() - newTime := currentTime.Add(-time.Hour) // give extra hour + currentTime := time.Now().UTC() + newTime := currentTime.Add(-time.Hour).UTC() // give extra hour runSyncAndVerify(a, raw, func(err error) { a.Nil(err) validateS2SSyncTransfersAreScheduled(a, "", "", fileList, mockedRPC) diff --git a/cmd/zt_sync_local_blob_test.go b/cmd/zt_sync_local_blob_test.go index 235c3a5fc..9e5225c4f 100644 --- a/cmd/zt_sync_local_blob_test.go +++ b/cmd/zt_sync_local_blob_test.go @@ -31,15 +31,14 @@ import ( "time" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" ) // regular file->blob sync func TestSyncUploadWithSingleFile(t *testing.T) { a := assert.New(t) - bsu := getBSU() - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + bsc := getBlobServiceClient() + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) for _, srcFileName := range []string{"singlefileisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the source as a single file @@ -51,8 +50,8 @@ func TestSyncUploadWithSingleFile(t *testing.T) { // set up the destination container with a single blob time.Sleep(time.Second) // later LMT dstBlobName := srcFileName - scenarioHelper{}.generateBlobsFromList(a, containerURL, []string{dstBlobName}, blockBlobDefaultData) - a.NotNil(containerURL) + scenarioHelper{}.generateBlobsFromList(a, cc, []string{dstBlobName}, blockBlobDefaultData) + a.NotNil(cc) // set up interceptor mockedRPC := interceptor{} @@ -90,7 +89,7 @@ func TestSyncUploadWithSingleFile(t *testing.T) { // this test seems to flake out. func TestSyncUploadWithEmptyDestination(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the source with numerous files srcDirName := scenarioHelper{}.generateLocalDirectory(a) @@ -99,8 +98,8 @@ func TestSyncUploadWithEmptyDestination(t *testing.T) { time.Sleep(time.Second) // set up an empty container - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) // set up interceptor mockedRPC := interceptor{} @@ -138,7 +137,7 @@ func TestSyncUploadWithEmptyDestination(t *testing.T) { // regular directory->container sync but destination is identical to the source, transfers are scheduled based on lmt func TestSyncUploadWithIdenticalDestination(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the source with numerous files srcDirName := scenarioHelper{}.generateLocalDirectory(a) @@ -146,12 +145,12 @@ func TestSyncUploadWithIdenticalDestination(t *testing.T) { fileList := scenarioHelper{}.generateCommonRemoteScenarioForLocal(a, srcDirName, "") // set up an the container with the exact same files, but later lmts - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) // wait for 1 second so that the last modified times of the blobs are guaranteed to be newer time.Sleep(time.Second) - scenarioHelper{}.generateBlobsFromList(a, containerURL, fileList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, fileList, blockBlobDefaultData) // set up interceptor mockedRPC := interceptor{} @@ -182,7 +181,7 @@ func TestSyncUploadWithIdenticalDestination(t *testing.T) { // regular container->directory sync where destination is missing some files from source, and also has some extra files func TestSyncUploadWithMismatchedDestination(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the source with numerous files srcDirName := scenarioHelper{}.generateLocalDirectory(a) @@ -192,10 +191,10 @@ func TestSyncUploadWithMismatchedDestination(t *testing.T) { // set up an the container with half of the files, but later lmts // also add some extra blobs that are not present at the source extraBlobs := []string{"extraFile1.pdf, extraFile2.txt"} - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - scenarioHelper{}.generateBlobsFromList(a, containerURL, fileList[0:len(fileList)/2], blockBlobDefaultData) - scenarioHelper{}.generateBlobsFromList(a, containerURL, extraBlobs, blockBlobDefaultData) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + scenarioHelper{}.generateBlobsFromList(a, cc, fileList[0:len(fileList)/2], blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, extraBlobs, blockBlobDefaultData) expectedOutput := fileList[len(fileList)/2:] // set up interceptor @@ -213,7 +212,7 @@ func TestSyncUploadWithMismatchedDestination(t *testing.T) { // make sure the extra blobs were deleted for _, blobName := range extraBlobs { - exists := scenarioHelper{}.blobExists(containerURL.NewBlobURL(blobName)) + exists := scenarioHelper{}.blobExists(cc.NewBlobClient(blobName)) a.False(exists) } }) @@ -222,7 +221,7 @@ func TestSyncUploadWithMismatchedDestination(t *testing.T) { // include flag limits the scope of source/destination comparison func TestSyncUploadWithIncludePatternFlag(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the source with numerous files srcDirName := scenarioHelper{}.generateLocalDirectory(a) @@ -235,8 +234,8 @@ func TestSyncUploadWithIncludePatternFlag(t *testing.T) { includeString := "*.pdf;*.jpeg;exactName" // set up the destination as an empty container - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) // set up interceptor mockedRPC := interceptor{} @@ -257,7 +256,7 @@ func TestSyncUploadWithIncludePatternFlag(t *testing.T) { // exclude flag limits the scope of source/destination comparison func TestSyncUploadWithExcludePatternFlag(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the source with numerous files srcDirName := scenarioHelper{}.generateLocalDirectory(a) @@ -270,8 +269,8 @@ func TestSyncUploadWithExcludePatternFlag(t *testing.T) { excludeString := "*.pdf;*.jpeg;exactName" // set up the destination as an empty container - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) // set up interceptor mockedRPC := interceptor{} @@ -292,7 +291,7 @@ func TestSyncUploadWithExcludePatternFlag(t *testing.T) { // include and exclude flag can work together to limit the scope of source/destination comparison func TestSyncUploadWithIncludeAndExcludePatternFlag(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the source with numerous files srcDirName := scenarioHelper{}.generateLocalDirectory(a) @@ -311,8 +310,8 @@ func TestSyncUploadWithIncludeAndExcludePatternFlag(t *testing.T) { excludeString := "so*;not*;exactName" // set up the destination as an empty container - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) // set up interceptor mockedRPC := interceptor{} @@ -334,7 +333,7 @@ func TestSyncUploadWithIncludeAndExcludePatternFlag(t *testing.T) { // a specific path is avoided in the comparison func TestSyncUploadWithExcludePathFlag(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the source with numerous files srcDirName := scenarioHelper{}.generateLocalDirectory(a) @@ -347,8 +346,8 @@ func TestSyncUploadWithExcludePathFlag(t *testing.T) { excludeString := "excludeSub;exactName" // set up the destination as an empty container - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) // set up interceptor mockedRPC := interceptor{} @@ -366,7 +365,7 @@ func TestSyncUploadWithExcludePathFlag(t *testing.T) { }) // now set up the destination with the blobs to be excluded, and make sure they are not touched - scenarioHelper{}.generateBlobsFromList(a, containerURL, filesToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cc, filesToExclude, blockBlobDefaultData) // re-create the ones at the source so that their lmts are newer scenarioHelper{}.generateLocalFilesFromList(a, srcDirName, filesToExclude) @@ -378,7 +377,7 @@ func TestSyncUploadWithExcludePathFlag(t *testing.T) { // make sure the extra blobs were not touched for _, blobName := range filesToExclude { - exists := scenarioHelper{}.blobExists(containerURL.NewBlobURL(blobName)) + exists := scenarioHelper{}.blobExists(cc.NewBlobClient(blobName)) a.True(exists) } }) @@ -387,7 +386,7 @@ func TestSyncUploadWithExcludePathFlag(t *testing.T) { // validate the bug fix for this scenario func TestSyncUploadWithMissingDestination(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // set up the source with numerous files srcDirName := scenarioHelper{}.generateLocalDirectory(a) @@ -395,10 +394,10 @@ func TestSyncUploadWithMissingDestination(t *testing.T) { scenarioHelper{}.generateCommonRemoteScenarioForLocal(a, srcDirName, "") // set up the destination as an non-existent container - containerURL, containerName := getContainerURL(a, bsu) + cc, containerName := getContainerClient(a, bsc) // validate that the container does not exist - _, err := containerURL.GetProperties(context.Background(), azblob.LeaseAccessConditions{}) + _, err := cc.GetProperties(context.Background(), nil) a.NotNil(err) // set up interceptor @@ -421,7 +420,7 @@ func TestSyncUploadWithMissingDestination(t *testing.T) { func TestDryrunSyncLocaltoBlob(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() //set up local src blobsToInclude := []string{"AzURE2.jpeg", "sub1/aTestOne.txt", "sub1/sub2/testTwo.pdf"} @@ -430,10 +429,10 @@ func TestDryrunSyncLocaltoBlob(t *testing.T) { scenarioHelper{}.generateLocalFilesFromList(a, srcDirName, blobsToInclude) //set up dst container - dstContainerURL, dstContainerName := createNewContainer(a, bsu) - defer deleteContainer(a, dstContainerURL) + dstContainerClient, dstContainerName := createNewContainer(a, bsc) + defer deleteContainer(a, dstContainerClient) blobsToDelete := []string{"testThree.jpeg"} - scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, blobsToDelete, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerClient, blobsToDelete, blockBlobDefaultData) // set up interceptor mockedRPC := interceptor{} @@ -456,11 +455,11 @@ func TestDryrunSyncLocaltoBlob(t *testing.T) { sort.Strings(msg) for i := 0; i < len(msg); i++ { if strings.Contains(msg[i], "DRYRUN: remove") { - a.True(strings.Contains(msg[i], dstContainerURL.String())) + a.True(strings.Contains(msg[i], dstContainerClient.URL())) } else { a.True(strings.Contains(msg[i], "DRYRUN: copy")) a.True(strings.Contains(msg[i], srcDirName)) - a.True(strings.Contains(msg[i], dstContainerURL.String())) + a.True(strings.Contains(msg[i], dstContainerClient.URL())) } } diff --git a/cmd/zt_sync_local_blob_windows_test.go b/cmd/zt_sync_local_blob_windows_test.go index 19614f266..63cafc44a 100644 --- a/cmd/zt_sync_local_blob_windows_test.go +++ b/cmd/zt_sync_local_blob_windows_test.go @@ -31,7 +31,7 @@ import ( func TestSyncUploadWithExcludeAttrFlag(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() srcDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirName) @@ -45,8 +45,8 @@ func TestSyncUploadWithExcludeAttrFlag(t *testing.T) { scenarioHelper{}.setAttributesForLocalFiles(a, srcDirName, filesToExclude, attrList) // set up the destination as an empty container - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) // set up interceptor mockedRPC := interceptor{} @@ -65,7 +65,7 @@ func TestSyncUploadWithExcludeAttrFlag(t *testing.T) { func TestSyncUploadWithIncludeAttrFlag(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() srcDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirName) @@ -79,8 +79,8 @@ func TestSyncUploadWithIncludeAttrFlag(t *testing.T) { scenarioHelper{}.setAttributesForLocalFiles(a, srcDirName, filesToInclude, attrList) // set up the destination as an empty container - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) // set up interceptor mockedRPC := interceptor{} @@ -104,7 +104,7 @@ func TestSyncUploadWithIncludeAttrFlag(t *testing.T) { // Only the last file should be transferred func TestSyncUploadWithIncludeAndIncludeAttrFlags(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() srcDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirName) @@ -117,8 +117,8 @@ func TestSyncUploadWithIncludeAndIncludeAttrFlags(t *testing.T) { attrList := []string{"H", "I", "C"} scenarioHelper{}.setAttributesForLocalFiles(a, srcDirName, fileList[1:], attrList) - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) mockedRPC := interceptor{} Rpc = mockedRPC.intercept @@ -142,7 +142,7 @@ func TestSyncUploadWithIncludeAndIncludeAttrFlags(t *testing.T) { // None of them should be transferred func TestSyncUploadWithExcludeAndExcludeAttrFlags(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() srcDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirName) @@ -155,8 +155,8 @@ func TestSyncUploadWithExcludeAndExcludeAttrFlags(t *testing.T) { attrList := []string{"H", "I", "C"} scenarioHelper{}.setAttributesForLocalFiles(a, srcDirName, fileList[1:], attrList) - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) mockedRPC := interceptor{} Rpc = mockedRPC.intercept @@ -176,16 +176,16 @@ func TestSyncUploadWithExcludeAndExcludeAttrFlags(t *testing.T) { // mouthfull of a test name, but this ensures that case insensitivity doesn't cause the unintended deletion of files func TestSyncDownloadWithDeleteDestinationOnCaseInsensitiveFS(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) fileList := []string{"FileWithCaps", "FiLeTwO", "FoOBaRBaZ"} - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) - scenarioHelper{}.generateBlobsFromList(a, containerURL, fileList, "Hello, World!") + scenarioHelper{}.generateBlobsFromList(a, cc, fileList, "Hello, World!") // let the local files be in the future; we don't want to do _anything_ to them; not delete nor download. time.Sleep(time.Second * 5) diff --git a/cmd/zt_sync_processor_test.go b/cmd/zt_sync_processor_test.go index 28968171b..a3014011e 100644 --- a/cmd/zt_sync_processor_test.go +++ b/cmd/zt_sync_processor_test.go @@ -28,7 +28,6 @@ import ( "testing" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" ) func TestLocalDeleter(t *testing.T) { @@ -63,17 +62,17 @@ func TestLocalDeleter(t *testing.T) { func TestBlobDeleter(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() blobName := "extraBlob.pdf" // set up the blob to delete - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - scenarioHelper{}.generateBlobsFromList(a, containerURL, []string{blobName}, blockBlobDefaultData) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + scenarioHelper{}.generateBlobsFromList(a, cc, []string{blobName}, blockBlobDefaultData) // validate that the blob exists - blobURL := containerURL.NewBlobURL(blobName) - _, err := blobURL.GetProperties(context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) + bc := cc.NewBlobClient(blobName) + _, err := bc.GetProperties(context.Background(), nil) a.Nil(err) // construct the cooked input to simulate user input @@ -94,23 +93,23 @@ func TestBlobDeleter(t *testing.T) { a.Nil(err) // validate that the blob was deleted - _, err = blobURL.GetProperties(context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) + _, err = bc.GetProperties(context.Background(),nil) a.NotNil(err) } func TestFileDeleter(t *testing.T) { a := assert.New(t) - fsu := getFSU() + fsc := getFileServiceClient() fileName := "extraFile.pdf" // set up the file to delete - shareURL, shareName := createNewAzureShare(a, fsu) - defer deleteShare(a, shareURL) - scenarioHelper{}.generateAzureFilesFromList(a, shareURL, []string{fileName}) + shareClient, shareName := createNewShare(a, fsc) + defer deleteShare(a, shareClient) + scenarioHelper{}.generateShareFilesFromList(a, shareClient, fsc, []string{fileName}) // validate that the file exists - fileURL := shareURL.NewRootDirectoryURL().NewFileURL(fileName) - _, err := fileURL.GetProperties(context.Background()) + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err := fileClient.GetProperties(context.Background(), nil) a.Nil(err) // construct the cooked input to simulate user input @@ -131,6 +130,6 @@ func TestFileDeleter(t *testing.T) { a.Nil(err) // validate that the file was deleted - _, err = fileURL.GetProperties(context.Background()) + _, err = fileClient.GetProperties(context.Background(), nil) a.NotNil(err) } \ No newline at end of file diff --git a/cmd/zt_test.go b/cmd/zt_test.go index 531ef2cd9..383a4b753 100644 --- a/cmd/zt_test.go +++ b/cmd/zt_test.go @@ -25,6 +25,22 @@ import ( "context" "errors" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob" + blobsas "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" + blobservice "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" + sharefile "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" + filesas "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + fileservice "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" "github.com/stretchr/testify/assert" "io" "math/rand" @@ -42,9 +58,6 @@ import ( "github.com/Azure/azure-storage-azcopy/v10/azbfs" "github.com/Azure/azure-storage-azcopy/v10/ste" "github.com/minio/minio-go" - - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" ) var ctx = context.Background() @@ -177,9 +190,9 @@ func generateFilesystemName() string { return generateName(blobfsPrefix, 63) } -func getShareURL(a *assert.Assertions, fsu azfile.ServiceURL) (share azfile.ShareURL, name string) { +func getShareClient(a *assert.Assertions, fsc *fileservice.Client) (share *share.Client, name string) { name = generateShareName() - share = fsu.NewShareURL(name) + share = fsc.NewShareClient(name) return share, name } @@ -192,11 +205,10 @@ func generateBfsFileName() string { return generateName(blobfsPrefix, 0) } -func getContainerURL(a *assert.Assertions, bsu azblob.ServiceURL) (container azblob.ContainerURL, name string) { +func getContainerClient(a *assert.Assertions, bsc *blobservice.Client) (container *container.Client, name string) { name = generateContainerName() - container = bsu.NewContainerURL(name) - - return container, name + container = bsc.NewContainerClient(name) + return } func getFilesystemURL(a *assert.Assertions, bfssu azbfs.ServiceURL) (filesystem azbfs.FileSystemURL, name string) { @@ -206,11 +218,10 @@ func getFilesystemURL(a *assert.Assertions, bfssu azbfs.ServiceURL) (filesystem return } -func getBlockBlobURL(a *assert.Assertions, container azblob.ContainerURL, prefix string) (blob azblob.BlockBlobURL, name string) { +func getBlockBlobClient(a *assert.Assertions, cc *container.Client, prefix string) (bbc *blockblob.Client, name string) { name = prefix + generateBlobName() - blob = container.NewBlockBlobURL(name) - - return blob, name + bbc = cc.NewBlockBlobClient(name) + return } func getBfsFileURL(a *assert.Assertions, filesystemURL azbfs.FileSystemURL, prefix string) (file azbfs.FileURL, name string) { @@ -220,24 +231,21 @@ func getBfsFileURL(a *assert.Assertions, filesystemURL azbfs.FileSystemURL, pref return } -func getAppendBlobURL(a *assert.Assertions, container azblob.ContainerURL, prefix string) (blob azblob.AppendBlobURL, name string) { +func getAppendBlobClient(a *assert.Assertions, cc *container.Client, prefix string) (abc *appendblob.Client, name string) { name = generateBlobName() - blob = container.NewAppendBlobURL(prefix + name) - - return blob, name + abc = cc.NewAppendBlobClient(prefix + name) + return } -func getPageBlobURL(a *assert.Assertions, container azblob.ContainerURL, prefix string) (blob azblob.PageBlobURL, name string) { +func getPageBlobClient(a *assert.Assertions, cc *container.Client, prefix string) (pbc *pageblob.Client, name string) { name = generateBlobName() - blob = container.NewPageBlobURL(prefix + name) - + pbc = cc.NewPageBlobClient(prefix + name) return } -func getAzureFileURL(a *assert.Assertions, shareURL azfile.ShareURL, prefix string) (fileURL azfile.FileURL, name string) { +func getAzureFileClient(a *assert.Assertions, sc *share.Client, prefix string) (fc *sharefile.Client, name string) { name = prefix + generateAzureFileName() - fileURL = shareURL.NewRootDirectoryURL().NewFileURL(name) - + fc = sc.NewRootDirectoryClient().NewFileClient(name) return } @@ -262,29 +270,36 @@ func getAccountAndKey() (string, string) { return name, key } -// get blob account service URL -func getBSU() azblob.ServiceURL { +// get blob account service client +func getBlobServiceClient() *blobservice.Client { accountName, accountKey := getAccountAndKey() - u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName)) + u := fmt.Sprintf("https://%s.blob.core.windows.net/", accountName) - credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) + credential, err := blob.NewSharedKeyCredential(accountName, accountKey) if err != nil { panic(err) } - pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{}) - return azblob.NewServiceURL(*u, pipeline) + client, err := blobservice.NewClientWithSharedKeyCredential(u, credential, nil) + if err != nil { + panic(err) + } + return client } -func getFSU() azfile.ServiceURL { +// get file account service client +func getFileServiceClient() *fileservice.Client { accountName, accountKey := getAccountAndKey() - u, _ := url.Parse(fmt.Sprintf("https://%s.file.core.windows.net/", accountName)) + u := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) - credential, err := azfile.NewSharedKeyCredential(accountName, accountKey) + credential, err := sharefile.NewSharedKeyCredential(accountName, accountKey) + if err != nil { + panic(err) + } + client, err := fileservice.NewClientWithSharedKeyCredential(u, credential, nil) if err != nil { panic(err) } - pipeline := azfile.NewPipeline(credential, azfile.PipelineOptions{}) - return azfile.NewServiceURL(*u, pipeline) + return client } func GetBFSSU() azbfs.ServiceURL { @@ -296,15 +311,15 @@ func GetBFSSU() azbfs.ServiceURL { return azbfs.NewServiceURL(*u, pipeline) } -func createNewContainer(a *assert.Assertions, bsu azblob.ServiceURL) (container azblob.ContainerURL, name string) { - container, name = getContainerURL(a, bsu) +func createNewContainer(a *assert.Assertions, bsc *blobservice.Client) (cc *container.Client, name string) { + cc, name = getContainerClient(a, bsc) // ignore any errors here, since it doesn't matter if this fails (if it does, it's probably because the container didn't exist) - _, _ = container.Delete(ctx, azblob.ContainerAccessConditions{}) + _, _ = cc.Delete(ctx, nil) - _, err := container.Create(ctx, nil, azblob.PublicAccessNone) + _, err := cc.Create(ctx, nil) a.Nil(err) - return container, name + return cc, name } func createNewFilesystem(a *assert.Assertions, bfssu azbfs.ServiceURL) (filesystem azbfs.FileSystemURL, name string) { @@ -333,76 +348,75 @@ func createNewBfsFile(a *assert.Assertions, filesystem azbfs.FileSystemURL, pref return } -func createNewBlockBlob(a *assert.Assertions, container azblob.ContainerURL, prefix string) (blob azblob.BlockBlobURL, name string) { - blob, name = getBlockBlobURL(a, container, prefix) +func createNewBlockBlob(a *assert.Assertions, cc *container.Client, prefix string) (bbc *blockblob.Client, name string) { + bbc, name = getBlockBlobClient(a, cc, prefix) - _, err := blob.Upload(ctx, strings.NewReader(blockBlobDefaultData), azblob.BlobHTTPHeaders{}, - nil, azblob.BlobAccessConditions{}, azblob.DefaultAccessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) + _, err := bbc.Upload(ctx, streaming.NopCloser(strings.NewReader(blockBlobDefaultData)), nil) a.Nil(err) return } // create metadata indicating that this is a dir -func createNewDirectoryStub(a *assert.Assertions, container azblob.ContainerURL, dirPath string) { - dir := container.NewBlockBlobURL(dirPath) - - _, err := dir.Upload(ctx, bytes.NewReader(nil), azblob.BlobHTTPHeaders{}, - azblob.Metadata{"hdi_isfolder": "true"}, azblob.BlobAccessConditions{}, azblob.DefaultAccessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) +func createNewDirectoryStub(a *assert.Assertions, cc *container.Client, dirPath string) { + dirClient := cc.NewBlockBlobClient(dirPath) + _, err := dirClient.Upload(ctx, streaming.NopCloser(bytes.NewReader(nil)), + &blockblob.UploadOptions{ + Metadata: map[string]*string{"hdi_isfolder": to.Ptr("true")}, + }) a.Nil(err) return } -func createNewAzureShare(a *assert.Assertions, fsu azfile.ServiceURL) (share azfile.ShareURL, name string) { - share, name = getShareURL(a, fsu) +func createNewShareFile(a *assert.Assertions, sc *share.Client, fsc *fileservice.Client, prefix string) (fc *sharefile.Client, name string) { + fc, name = getAzureFileClient(a, sc, prefix) + + // generate parents first + generateParentsForShareFile(a, fc, fsc) - _, err := share.Create(ctx, nil, 0) + _, err := fc.Create(ctx, defaultAzureFileSizeInBytes, nil) a.Nil(err) - return share, name + return } -func createNewAzureFile(a *assert.Assertions, share azfile.ShareURL, prefix string) (file azfile.FileURL, name string) { - file, name = getAzureFileURL(a, share, prefix) - - // generate parents first - generateParentsForAzureFile(a, file) +func createNewShare(a *assert.Assertions, fsc *fileservice.Client) (sc *share.Client, name string) { + sc, name = getShareClient(a, fsc) - _, err := file.Create(ctx, defaultAzureFileSizeInBytes, azfile.FileHTTPHeaders{}, azfile.Metadata{}) + _, err := sc.Create(ctx, nil) a.Nil(err) - return + return sc, name } -func generateParentsForAzureFile(a *assert.Assertions, fileURL azfile.FileURL) { - accountName, accountKey := getAccountAndKey() - credential, _ := azfile.NewSharedKeyCredential(accountName, accountKey) +func generateParentsForShareFile(a *assert.Assertions, fileClient *sharefile.Client, serviceClient *fileservice.Client) { t := ste.NewFolderCreationTracker(common.EFolderPropertiesOption.NoFolders(), nil) - err := ste.AzureFileParentDirCreator{}.CreateParentDirToRoot(ctx, fileURL, azfile.NewPipeline(credential, azfile.PipelineOptions{}), t) + err := ste.AzureFileParentDirCreator{}.CreateParentDirToRoot(ctx, fileClient, serviceClient, t) a.Nil(err) } -func createNewAppendBlob(a *assert.Assertions, container azblob.ContainerURL, prefix string) (blob azblob.AppendBlobURL, name string) { - blob, name = getAppendBlobURL(a, container, prefix) - - _, err := blob.Create(ctx, azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{}, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) +func createNewAppendBlob(a *assert.Assertions, cc *container.Client, prefix string) (abc *appendblob.Client, name string) { + abc, name = getAppendBlobClient(a, cc, prefix) + _, err := abc.Create(ctx, nil) a.Nil(err) + return } -func createNewPageBlob(a *assert.Assertions, container azblob.ContainerURL, prefix string) (blob azblob.PageBlobURL, name string) { - blob, name = getPageBlobURL(a, container, prefix) +func createNewPageBlob(a *assert.Assertions, cc *container.Client, prefix string) (pbc *pageblob.Client, name string) { + pbc, name = getPageBlobClient(a, cc, prefix) - _, err := blob.Create(ctx, azblob.PageBlobPageBytes*10, 0, azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{}, azblob.DefaultPremiumBlobAccessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) + _, err := pbc.Create(ctx, pageblob.PageBytes*10, nil) a.Nil(err) + return } -func deleteContainer(a *assert.Assertions, container azblob.ContainerURL) { - _, err := container.Delete(ctx, azblob.ContainerAccessConditions{}) +func deleteContainer(a *assert.Assertions, cc *container.Client) { + _, err := cc.Delete(ctx, nil) a.Nil(err) } @@ -411,9 +425,8 @@ func deleteFilesystem(a *assert.Assertions, filesystem azbfs.FileSystemURL) { a.Nil(err) } -func validateStorageError(a *assert.Assertions, err error, code azblob.ServiceCodeType) { - serr, _ := err.(azblob.StorageError) - a.Equal(code, serr.ServiceCode()) +func validateStorageError(a *assert.Assertions, err error, code bloberror.Code) { + a.True(bloberror.HasCode(err, code)) } func getRelativeTimeGMT(amount time.Duration) time.Time { @@ -632,18 +645,19 @@ func cleanGCPAccount(client *gcpUtils.Client) { } } -func cleanBlobAccount(a *assert.Assertions, serviceURL azblob.ServiceURL) { - marker := azblob.Marker{} - for marker.NotDone() { - resp, err := serviceURL.ListContainersSegment(ctx, marker, azblob.ListContainersSegmentOptions{}) +func cleanBlobAccount(a *assert.Assertions, serviceClient *blobservice.Client) { + pager := serviceClient.NewListContainersPager(nil) + for pager.More() { + resp, err := pager.NextPage(ctx) a.Nil(err) for _, v := range resp.ContainerItems { - _, err = serviceURL.NewContainerURL(v.Name).Delete(ctx, azblob.ContainerAccessConditions{}) + _, err = serviceClient.NewContainerClient(*v.Name).Delete(ctx, nil) if err != nil { - if stgErr, ok := err.(azblob.StorageError); ok { - if stgErr.ServiceCode() == azblob.ServiceCodeContainerNotFound { + var respErr *azcore.ResponseError + if errors.As(err, &respErr) { + if respErr.ErrorCode == string(bloberror.ContainerNotFound) { continue } } @@ -651,23 +665,22 @@ func cleanBlobAccount(a *assert.Assertions, serviceURL azblob.ServiceURL) { a.Nil(err) } } - - marker = resp.NextMarker } } -func cleanFileAccount(a *assert.Assertions, serviceURL azfile.ServiceURL) { - marker := azfile.Marker{} - for marker.NotDone() { - resp, err := serviceURL.ListSharesSegment(ctx, marker, azfile.ListSharesOptions{}) +func cleanFileAccount(a *assert.Assertions, serviceClient *fileservice.Client) { + pager := serviceClient.NewListSharesPager(nil) + for pager.More() { + resp, err := pager.NextPage(ctx) a.Nil(err) - for _, v := range resp.ShareItems { - _, err = serviceURL.NewShareURL(v.Name).Delete(ctx, azfile.DeleteSnapshotsOptionNone) + for _, v := range resp.Shares { + _, err = serviceClient.NewShareClient(*v.Name).Delete(ctx, nil) if err != nil { - if stgErr, ok := err.(azfile.StorageError); ok { - if stgErr.ServiceCode() == azfile.ServiceCodeShareNotFound { + var respErr *azcore.ResponseError + if errors.As(err, &respErr) { + if respErr.ErrorCode == string(fileerror.ShareNotFound) { continue } } @@ -675,41 +688,13 @@ func cleanFileAccount(a *assert.Assertions, serviceURL azfile.ServiceURL) { a.Nil(err) } } - - marker = resp.NextMarker } time.Sleep(time.Minute) } -func getGenericCredentialForFile(accountType string) (*azfile.SharedKeyCredential, error) { - accountNameEnvVar := accountType + "ACCOUNT_NAME" - accountKeyEnvVar := accountType + "ACCOUNT_KEY" - accountName, accountKey := os.Getenv(accountNameEnvVar), os.Getenv(accountKeyEnvVar) - if accountName == "" || accountKey == "" { - return nil, errors.New(accountNameEnvVar + " and/or " + accountKeyEnvVar + " environment variables not specified.") - } - return azfile.NewSharedKeyCredential(accountName, accountKey) -} - -func getAlternateFSU() (azfile.ServiceURL, error) { - secondaryAccountName, secondaryAccountKey := os.Getenv("SECONDARY_ACCOUNT_NAME"), os.Getenv("SECONDARY_ACCOUNT_KEY") - if secondaryAccountName == "" || secondaryAccountKey == "" { - return azfile.ServiceURL{}, errors.New("SECONDARY_ACCOUNT_NAME and/or SECONDARY_ACCOUNT_KEY environment variables not specified.") - } - fsURL, _ := url.Parse("https://" + secondaryAccountName + ".file.core.windows.net/") - - credential, err := azfile.NewSharedKeyCredential(secondaryAccountName, secondaryAccountKey) - if err != nil { - return azfile.ServiceURL{}, err - } - pipeline := azfile.NewPipeline(credential, azfile.PipelineOptions{ /*Log: pipeline.NewLogWrapper(pipeline.LogInfo, log.New(os.Stderr, "", log.LstdFlags))*/ }) - - return azfile.NewServiceURL(*fsURL, pipeline), nil -} - -func deleteShare(a *assert.Assertions, share azfile.ShareURL) { - _, err := share.Delete(ctx, azfile.DeleteSnapshotsOptionInclude) +func deleteShare(a *assert.Assertions, sc *share.Client) { + _, err := sc.Delete(ctx, &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) a.Nil(err) } @@ -718,131 +703,119 @@ func deleteShare(a *assert.Assertions, share azfile.ShareURL) { // those changes not being reflected yet, we will wait 30 seconds and try the test again. If it fails this time for any reason, // we fail the test. It is the responsibility of the the testImplFunc to determine which error string indicates the test should be retried. // There can only be one such string. All errors that cannot be due to this detail should be asserted and not returned as an error string. -func runTestRequiringServiceProperties(a *assert.Assertions, bsu azblob.ServiceURL, code string, - enableServicePropertyFunc func(*assert.Assertions, azblob.ServiceURL), - testImplFunc func(*assert.Assertions, azblob.ServiceURL) error, - disableServicePropertyFunc func(*assert.Assertions, azblob.ServiceURL)) { - enableServicePropertyFunc(a, bsu) - defer disableServicePropertyFunc(a, bsu) - err := testImplFunc(a, bsu) +func runTestRequiringServiceProperties(a *assert.Assertions, bsc *blobservice.Client, code string, + enableServicePropertyFunc func(*assert.Assertions, *blobservice.Client), + testImplFunc func(*assert.Assertions, *blobservice.Client) error, + disableServicePropertyFunc func(*assert.Assertions, *blobservice.Client)) { + enableServicePropertyFunc(a, bsc) + defer disableServicePropertyFunc(a, bsc) + err := testImplFunc(a, bsc) // We cannot assume that the error indicative of slow update will necessarily be a StorageError. As in ListBlobs. if err != nil && err.Error() == code { time.Sleep(time.Second * 30) - err = testImplFunc(a, bsu) + err = testImplFunc(a, bsc) a.Nil(err) } } -func enableSoftDelete(a *assert.Assertions, bsu azblob.ServiceURL) { - days := int32(1) - _, err := bsu.SetProperties(ctx, azblob.StorageServiceProperties{DeleteRetentionPolicy: &azblob.RetentionPolicy{Enabled: true, Days: &days}}) +func enableSoftDelete(a *assert.Assertions, bsc *blobservice.Client) { + _, err := bsc.SetProperties(ctx, &blobservice.SetPropertiesOptions{ + DeleteRetentionPolicy: &blobservice.RetentionPolicy{Enabled: to.Ptr(true), Days: to.Ptr(int32(1))}, + }) a.Nil(err) } -func disableSoftDelete(a *assert.Assertions, bsu azblob.ServiceURL) { - _, err := bsu.SetProperties(ctx, azblob.StorageServiceProperties{DeleteRetentionPolicy: &azblob.RetentionPolicy{Enabled: false}}) +func disableSoftDelete(a *assert.Assertions, bsc *blobservice.Client) { + _, err := bsc.SetProperties(ctx, &blobservice.SetPropertiesOptions{ + DeleteRetentionPolicy: &blobservice.RetentionPolicy{Enabled: to.Ptr(false)}, + }) a.Nil(err) } -func validateUpload(a *assert.Assertions, blobURL azblob.BlockBlobURL) { - resp, err := blobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) +func validateUpload(a *assert.Assertions, bbc *blockblob.Client) { + resp, err := bbc.DownloadStream(ctx, nil) a.Nil(err) - data, _ := io.ReadAll(resp.Response().Body) + data, _ := io.ReadAll(resp.Body) a.Len(data, 0) } -func getContainerURLWithSAS(a *assert.Assertions, credential azblob.SharedKeyCredential, containerName string) azblob.ContainerURL { - sasQueryParams, err := azblob.BlobSASSignatureValues{ - Protocol: azblob.SASProtocolHTTPS, - ExpiryTime: time.Now().UTC().Add(48 * time.Hour), - ContainerName: containerName, - Permissions: azblob.ContainerSASPermissions{Read: true, Add: true, Write: true, Create: true, Delete: true, DeletePreviousVersion: true, List: true, Tag: true}.String(), - }.NewSASQueryParameters(&credential) - a.Nil(err) +func getContainerClientWithSAS(a *assert.Assertions, credential *blob.SharedKeyCredential, containerName string) *container.Client { + rawURL := fmt.Sprintf("https://%s.blob.core.windows.net/%s", + credential.AccountName(), containerName) + client, err := container.NewClientWithSharedKeyCredential(rawURL, credential, nil) - // construct the url from scratch - qp := sasQueryParams.Encode() - rawURL := fmt.Sprintf("https://%s.blob.core.windows.net/%s?%s", - credential.AccountName(), containerName, qp) + sasURL, err := client.GetSASURL( + blobsas.ContainerPermissions{Read: true, Add: true, Write: true, Create: true, Delete: true, DeletePreviousVersion: true, List: true, Tag: true}, + time.Now().Add(48*time.Hour), + nil) + a.Nil(err) - // convert the raw url and validate it was parsed successfully - fullURL, err := url.Parse(rawURL) + client, err = container.NewClientWithNoCredential(sasURL, nil) a.Nil(err) - // TODO perhaps we need a global default pipeline - return azblob.NewContainerURL(*fullURL, azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})) + return client } -func getBlobServiceURLWithSAS(a *assert.Assertions, credential azblob.SharedKeyCredential) azblob.ServiceURL { - sasQueryParams, err := azblob.AccountSASSignatureValues{ - Protocol: azblob.SASProtocolHTTPS, - ExpiryTime: time.Now().Add(48 * time.Hour), - Permissions: azblob.AccountSASPermissions{Read: true, List: true, Write: true, Delete: true, DeletePreviousVersion: true, Add: true, Create: true, Update: true, Process: true, Tag: true}.String(), - Services: azblob.AccountSASServices{File: true, Blob: true, Queue: true}.String(), - ResourceTypes: azblob.AccountSASResourceTypes{Service: true, Container: true, Object: true}.String(), - }.NewSASQueryParameters(&credential) - a.Nil(err) +func getBlobServiceClientWithSAS(a *assert.Assertions, credential *blob.SharedKeyCredential) *blobservice.Client { + rawURL := fmt.Sprintf("https://%s.blob.core.windows.net/", + credential.AccountName()) + client, err := blobservice.NewClientWithSharedKeyCredential(rawURL, credential, nil) - // construct the url from scratch - qp := sasQueryParams.Encode() - rawURL := fmt.Sprintf("https://%s.blob.core.windows.net/?%s", - credential.AccountName(), qp) + sasURL, err := client.GetSASURL( + blobsas.AccountResourceTypes{Service: true, Container: true, Object: true}, + blobsas.AccountPermissions{Read: true, List: true, Write: true, Delete: true, DeletePreviousVersion: true, Add: true, Create: true, Update: true, Process: true, Tag: true}, + time.Now().Add(48*time.Hour), + nil) + a.Nil(err) - // convert the raw url and validate it was parsed successfully - fullURL, err := url.Parse(rawURL) + client, err = blobservice.NewClientWithNoCredential(sasURL, nil) a.Nil(err) - return azblob.NewServiceURL(*fullURL, azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})) + return client } -func getFileServiceURLWithSAS(a *assert.Assertions, credential azfile.SharedKeyCredential) azfile.ServiceURL { - sasQueryParams, err := azfile.AccountSASSignatureValues{ - Protocol: azfile.SASProtocolHTTPS, - ExpiryTime: time.Now().Add(48 * time.Hour), - Permissions: azfile.AccountSASPermissions{Read: true, List: true, Write: true, Delete: true, Add: true, Create: true, Update: true, Process: true}.String(), - Services: azfile.AccountSASServices{File: true, Blob: true, Queue: true}.String(), - ResourceTypes: azfile.AccountSASResourceTypes{Service: true, Container: true, Object: true}.String(), - }.NewSASQueryParameters(&credential) - a.Nil(err) +func getFileServiceClientWithSAS(a *assert.Assertions, credential *sharefile.SharedKeyCredential) *fileservice.Client { + rawURL := fmt.Sprintf("https://%s.file.core.windows.net/", + credential.AccountName()) + client, err := fileservice.NewClientWithSharedKeyCredential(rawURL, credential, nil) - qp := sasQueryParams.Encode() - rawURL := fmt.Sprintf("https://%s.file.core.windows.net/?%s", credential.AccountName(), qp) + sasURL, err := client.GetSASURL( + filesas.AccountResourceTypes{Service: true, Container: true, Object: true}, + filesas.AccountPermissions{Read: true, List: true, Write: true, Delete: true, Create: true}, + time.Now().Add(48*time.Hour), + nil) + a.Nil(err) - fullURL, err := url.Parse(rawURL) + client, err = fileservice.NewClientWithNoCredential(sasURL, nil) a.Nil(err) - return azfile.NewServiceURL(*fullURL, azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{})) + return client } -func getShareURLWithSAS(a *assert.Assertions, credential azfile.SharedKeyCredential, shareName string) azfile.ShareURL { - sasQueryParams, err := azfile.FileSASSignatureValues{ - Protocol: azfile.SASProtocolHTTPS, - ExpiryTime: time.Now().UTC().Add(48 * time.Hour), - ShareName: shareName, - Permissions: azfile.ShareSASPermissions{Read: true, Write: true, Create: true, Delete: true, List: true}.String(), - }.NewSASQueryParameters(&credential) - a.Nil(err) +func getShareClientWithSAS(a *assert.Assertions, credential *sharefile.SharedKeyCredential, shareName string) *share.Client { + rawURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", + credential.AccountName(), shareName) + client, err := share.NewClientWithSharedKeyCredential(rawURL, credential, nil) - // construct the url from scratch - qp := sasQueryParams.Encode() - rawURL := fmt.Sprintf("https://%s.file.core.windows.net/%s?%s", - credential.AccountName(), shareName, qp) + sasURL, err := client.GetSASURL( + filesas.SharePermissions{Read: true, Write: true, Create: true, Delete: true, List: true}, + time.Now().Add(48*time.Hour), + nil) + a.Nil(err) - // convert the raw url and validate it was parsed successfully - fullURL, err := url.Parse(rawURL) + client, err = share.NewClientWithNoCredential(sasURL, nil) a.Nil(err) - // TODO perhaps we need a global default pipeline - return azfile.NewShareURL(*fullURL, azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{})) + return client } func getAdlsServiceURLWithSAS(a *assert.Assertions, credential azbfs.SharedKeyCredential) azbfs.ServiceURL { sasQueryParams, err := azbfs.AccountSASSignatureValues{ Protocol: azbfs.SASProtocolHTTPS, ExpiryTime: time.Now().Add(48 * time.Hour), - Permissions: azfile.AccountSASPermissions{Read: true, List: true, Write: true, Delete: true, Add: true, Create: true, Update: true, Process: true}.String(), - Services: azfile.AccountSASServices{File: true, Blob: true, Queue: true}.String(), - ResourceTypes: azfile.AccountSASResourceTypes{Service: true, Container: true, Object: true}.String(), + Permissions: azbfs.AccountSASPermissions{Read: true, List: true, Write: true, Delete: true, Add: true, Create: true, Update: true, Process: true}.String(), + Services: azbfs.AccountSASServices{File: true, Blob: true, Queue: true}.String(), + ResourceTypes: azbfs.AccountSASResourceTypes{Service: true, Container: true, Object: true}.String(), }.NewSASQueryParameters(&credential) a.Nil(err) diff --git a/cmd/zt_traverser_blob_test.go b/cmd/zt_traverser_blob_test.go index 18dd65736..656736406 100644 --- a/cmd/zt_traverser_blob_test.go +++ b/cmd/zt_traverser_blob_test.go @@ -24,29 +24,29 @@ import ( "context" "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-azcopy/v10/ste" - "github.com/Azure/azure-storage-blob-go/azblob" "github.com/stretchr/testify/assert" "testing" ) func TestIsSourceDirWithStub(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // Generate source container and blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + a.NotNil(cc) dirName := "source_dir" - createNewDirectoryStub(a, containerURL, dirName) + createNewDirectoryStub(a, cc, dirName) + // set up to create blob traverser ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) - p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) // List - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, dirName) - blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) + rawBlobURLWithSAS := scenarioHelper{}.getBlobClientWithSAS(a, containerName, dirName).URL() + serviceClientWithSAS := scenarioHelper{}.getBlobServiceClientWithSASFromURL(a, rawBlobURLWithSAS) + blobTraverser := newBlobTraverser(rawBlobURLWithSAS, serviceClientWithSAS, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) isDir, err := blobTraverser.IsDirectory(true) a.True(isDir) @@ -55,20 +55,20 @@ func TestIsSourceDirWithStub(t *testing.T) { func TestIsSourceDirWithNoStub(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // Generate source container and blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + a.NotNil(cc) dirName := "source_dir/" ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) - p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) // List - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, dirName) - blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) + rawBlobURLWithSAS := scenarioHelper{}.getBlobClientWithSAS(a, containerName, dirName).URL() + serviceClientWithSAS := scenarioHelper{}.getBlobServiceClientWithSASFromURL(a, rawBlobURLWithSAS) + blobTraverser := newBlobTraverser(rawBlobURLWithSAS, serviceClientWithSAS, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) isDir, err := blobTraverser.IsDirectory(true) a.True(isDir) @@ -77,20 +77,20 @@ func TestIsSourceDirWithNoStub(t *testing.T) { func TestIsDestDirWithBlobEP(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // Generate source container and blobs - containerURL, containerName := createNewContainer(a, bsu) + containerURL, containerName := createNewContainer(a, bsc) defer deleteContainer(a, containerURL) a.NotNil(containerURL) dirName := "dest_dir/" ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) - p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) // List - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, dirName) - blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) + rawBlobURLWithSAS := scenarioHelper{}.getBlobClientWithSAS(a, containerName, dirName).URL() + serviceClientWithSAS := scenarioHelper{}.getBlobServiceClientWithSASFromURL(a, rawBlobURLWithSAS) + blobTraverser := newBlobTraverser(rawBlobURLWithSAS, serviceClientWithSAS, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) isDir, err := blobTraverser.IsDirectory(false) a.True(isDir) @@ -99,8 +99,8 @@ func TestIsDestDirWithBlobEP(t *testing.T) { //=========================================================== dirName = "dest_file" // List - rawBlobURLWithSAS = scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, dirName) - blobTraverser = newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) + rawBlobURLWithSAS = scenarioHelper{}.getBlobClientWithSAS(a, containerName, dirName).URL() + blobTraverser = newBlobTraverser(rawBlobURLWithSAS, serviceClientWithSAS, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) isDir, err = blobTraverser.IsDirectory(false) a.False(isDir) @@ -122,11 +122,11 @@ func TestIsDestDirWithDFSEP(t *testing.T) { a.Nil(err) ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) - p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) // List - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, fileSystemName, parentDirName) - blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), true) + rawBlobURLWithSAS := scenarioHelper{}.getBlobClientWithSAS(a, fileSystemName, parentDirName).URL() + serviceClientWithSAS := scenarioHelper{}.getBlobServiceClientWithSASFromURL(a, rawBlobURLWithSAS) + blobTraverser := newBlobTraverser(rawBlobURLWithSAS, serviceClientWithSAS, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), true) // a directory with name parentDirName exists on target. So irrespective of // isSource, IsDirectory() should return true. @@ -142,8 +142,8 @@ func TestIsDestDirWithDFSEP(t *testing.T) { // With a directory that does not exist, without path separator. parentDirName = "dirDoesNotExist" - rawBlobURLWithSAS = scenarioHelper{}.getRawBlobURLWithSAS(a, fileSystemName, parentDirName) - blobTraverser = newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), true) + rawBlobURLWithSAS = scenarioHelper{}.getBlobClientWithSAS(a, fileSystemName, parentDirName).URL() + blobTraverser = newBlobTraverser(rawBlobURLWithSAS, serviceClientWithSAS, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), true) // The directory does not exist, so IsDirectory() // should return false, in all cases @@ -159,8 +159,8 @@ func TestIsDestDirWithDFSEP(t *testing.T) { // With a directory that does not exist, with path separator parentDirNameWithSeparator := "dirDoesNotExist" + common.OS_PATH_SEPARATOR - rawBlobURLWithSAS = scenarioHelper{}.getRawBlobURLWithSAS(a, fileSystemName, parentDirNameWithSeparator) - blobTraverser = newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), true) + rawBlobURLWithSAS = scenarioHelper{}.getBlobClientWithSAS(a, fileSystemName, parentDirNameWithSeparator).URL() + blobTraverser = newBlobTraverser(rawBlobURLWithSAS, serviceClientWithSAS, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), true) // The directory does not exist, but with a path separator // we should identify it as a directory. @@ -176,22 +176,22 @@ func TestIsDestDirWithDFSEP(t *testing.T) { func TestIsSourceFileExists(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // Generate source container and blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + a.NotNil(cc) fileName := "source_file" - _, fileName = createNewBlockBlob(a, containerURL, fileName) + _, fileName = createNewBlockBlob(a, cc, fileName) ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) - p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) // List - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, fileName) - blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) + rawBlobURLWithSAS := scenarioHelper{}.getBlobClientWithSAS(a, containerName, fileName).URL() + serviceClientWithSAS := scenarioHelper{}.getBlobServiceClientWithSASFromURL(a, rawBlobURLWithSAS) + blobTraverser := newBlobTraverser(rawBlobURLWithSAS, serviceClientWithSAS, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) isDir, err := blobTraverser.IsDirectory(true) a.False(isDir) @@ -200,20 +200,20 @@ func TestIsSourceFileExists(t *testing.T) { func TestIsSourceFileDoesNotExist(t *testing.T) { a := assert.New(t) - bsu := getBSU() + bsc := getBlobServiceClient() // Generate source container and blobs - containerURL, containerName := createNewContainer(a, bsu) - defer deleteContainer(a, containerURL) - a.NotNil(containerURL) + cc, containerName := createNewContainer(a, bsc) + defer deleteContainer(a, cc) + a.NotNil(cc) fileName := "file_does_not_exist" ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) - p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) // List - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, fileName) - blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) + rawBlobURLWithSAS := scenarioHelper{}.getBlobClientWithSAS(a, containerName, fileName).URL() + serviceClientWithSAS := scenarioHelper{}.getBlobServiceClientWithSASFromURL(a, rawBlobURLWithSAS) + blobTraverser := newBlobTraverser(rawBlobURLWithSAS, serviceClientWithSAS, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None(), false) isDir, err := blobTraverser.IsDirectory(true) a.False(isDir) diff --git a/cmd/zt_unit_test.go b/cmd/zt_unit_test.go new file mode 100644 index 000000000..2b962dae3 --- /dev/null +++ b/cmd/zt_unit_test.go @@ -0,0 +1,70 @@ +package cmd + +import ( + "github.com/Azure/azure-storage-azcopy/v10/common" + "github.com/Azure/azure-storage-azcopy/v10/jobsAdmin" + "github.com/Azure/azure-storage-azcopy/v10/ste" + "github.com/Azure/azure-storage-azcopy/v10/testSuite/cmd" + "github.com/stretchr/testify/assert" + "log" + "os" + "path" + "testing" +) + +func TestUnit(t *testing.T) { + a := assert.New(t) + azcopyCurrentJobID = common.NewJobID() + azcopyLogPathFolder = common.GetLifecycleMgr().GetEnvironmentVariable(common.EEnvironmentVariable.LogLocation()) // user specified location for log files + azcopyJobPlanFolder := common.GetLifecycleMgr().GetEnvironmentVariable(common.EEnvironmentVariable.JobPlanLocation()) // user specified location for plan files + + // note: azcopyAppPathFolder is the default location for all AzCopy data (logs, job plans, oauth token on Windows) + // but all the above can be put elsewhere as they can become very large + azcopyAppPathFolder := cmd.GetAzCopyAppPath() + + // the user can optionally put the log files somewhere else + if azcopyLogPathFolder == "" { + azcopyLogPathFolder = azcopyAppPathFolder + } + if err := os.Mkdir(azcopyLogPathFolder, os.ModeDir|os.ModePerm); err != nil && !os.IsExist(err) { + log.Fatalf("Problem making .azcopy directory. Try setting AZCOPY_LOG_LOCATION env variable. %v", err) + } + + // the user can optionally put the plan files somewhere else + if azcopyJobPlanFolder == "" { + // make the app path folder ".azcopy" first so we can make a plans folder in it + if err := os.MkdirAll(azcopyAppPathFolder, os.ModeDir); err != nil && !os.IsExist(err) { + log.Fatalf("Problem making .azcopy directory. Try setting AZCOPY_JOB_PLAN_LOCATION env variable. %v", err) + } + azcopyJobPlanFolder = path.Join(azcopyAppPathFolder, "plans") + } + + if err := os.MkdirAll(azcopyJobPlanFolder, os.ModeDir|os.ModePerm); err != nil && !os.IsExist(err) { + log.Fatalf("Problem making .azcopy directory. Try setting AZCOPY_JOB_PLAN_LOCATION env variable. %v", err) + } + common.AzcopyCurrentJobLogger = common.NewJobLogger(azcopyCurrentJobID, azcopyLogVerbosity, azcopyLogPathFolder, "") + common.AzcopyCurrentJobLogger.OpenLog() + common.AzcopyJobPlanFolder = azcopyJobPlanFolder + concurrencySettings := ste.NewConcurrencySettings(azcopyMaxFileAndSocketHandles, false) + err := jobsAdmin.MainSTE(concurrencySettings, float64(cmdLineCapMegaBitsPerSecond), azcopyJobPlanFolder, azcopyLogPathFolder, false) + a.Nil(err) + rawCopy := rawCopyCmdArgs{ + src: "https://azcopyperftestsource.blob.core.windows.net/noaa-bathymetry-pds/?sp=racwdli&st=2023-07-13T18:21:20Z&se=2024-07-14T02:21:20Z&spr=https&sv=2022-11-02&sr=c&sig=r2qpNrnESM0of9uEZaduJezFtAiZLj3lykMK4%2BX7gDA%3D", + dst: "https://azcopyperftestdst.blob.core.windows.net/noaa-bathymetry-pds/?sv=2022-11-02&ss=bfqt&srt=sco&sp=rwdlacupiytfx&se=2024-07-14T02:25:51Z&st=2023-07-13T18:25:51Z&spr=https&sig=8%2FUXpjeOPp2fTqIWW5zEfm1UtDZJRffXFpxpwHKIpKM%3D", + recursive: true, + blockSizeMB: 128, + forceWrite: "true", + blobType: "blockblob", + blockBlobTier: "none", + pageBlobTier: "none", + md5ValidationOption: "FailIfDifferent", + preserveOwner: true, + s2sInvalidMetadataHandleOption: "ExcludeIfInvalid", + } + cooked, err := rawCopy.cook() + a.Nil(err) + + cooked.commandString = copyHandlerUtil{}.ConstructCommandStringFromArgs() + err = cooked.process() + a.Nil(err) +} \ No newline at end of file diff --git a/cmd/zt_user_input_test.go b/cmd/zt_user_input_test.go index 1d91f92eb..6de20bd34 100644 --- a/cmd/zt_user_input_test.go +++ b/cmd/zt_user_input_test.go @@ -50,8 +50,8 @@ func TestCPKEncryptionInputTest(t *testing.T) { a.Contains(err.Error(), "client provided keys (CPK) based encryption is only supported with blob endpoints (blob.core.windows.net)") }) - rawContainerURL := scenarioHelper{}.getContainerURL(a, "testcpkcontainer") - raw2 := getDefaultRawCopyInput(dirPath, rawContainerURL.String()) + rawContainerURL := scenarioHelper{}.getContainerClient(a, "testcpkcontainer") + raw2 := getDefaultRawCopyInput(dirPath, rawContainerURL.URL()) raw2.recursive = true raw2.cpkInfo = true diff --git a/common/access.go b/common/access.go index abac05af4..4fac704f2 100644 --- a/common/access.go +++ b/common/access.go @@ -2,38 +2,48 @@ package common import ( "context" - "net/url" - - "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" ) func IsSourcePublicBlob(sourceURI string, ctx context.Context) bool { - uri, err := url.Parse(sourceURI) + blobParts, err := blob.ParseURL(sourceURI) if err != nil { // this should never, would never be hit. // a job plan file couldn't be created by AzCopy with an invalid URI. panic("Source URI was invalid.") } - blobParts := azblob.NewBlobURLParts(*uri) - // only containers can be public access if blobParts.ContainerName != "" { if blobParts.BlobName != "" { // first test that it's a blob - bURL := azblob.NewBlobURL(*uri, azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})) - _, err := bURL.GetProperties(ctx, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) + blobClient, err := blob.NewClientWithNoCredential(sourceURI, nil) + if err != nil { + // this should never, would never be hit. + // a job plan file couldn't be created by AzCopy with an invalid URI. + panic("Blob client was unable to be created.") + } + _, err = blobClient.GetProperties(ctx, nil) if err == nil { return true } // since that failed, maybe it doesn't exist and is public to list? blobParts.BlobName = "" + blobParts.Snapshot = "" + blobParts.VersionID = "" } - cURL := azblob.NewContainerURL(blobParts.URL(), azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})) + containerClient, err := container.NewClientWithNoCredential(blobParts.String(), nil) + if err != nil { + // this should never, would never be hit. + // a job plan file couldn't be created by AzCopy with an invalid URI. + panic("Container client was unable to be created.") + } - _, err := cURL.ListBlobsFlatSegment(ctx, azblob.Marker{}, azblob.ListBlobsSegmentOptions{}) + pager := containerClient.NewListBlobsFlatPager(nil) + _, err = pager.NextPage(ctx) if err == nil { return true } diff --git a/common/clientFactory.go b/common/clientFactory.go new file mode 100644 index 000000000..4b82edb1d --- /dev/null +++ b/common/clientFactory.go @@ -0,0 +1,293 @@ +// Copyright © 2017 Microsoft +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package common + +import ( + "errors" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob" + blobservice "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" + sharedirectory "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/directory" + sharefile "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + fileservice "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" +) + +var glcm = GetLifecycleMgr() + +// newClientCallbacks is a Generic Type to allow client creation error handling to live in a single place (createClient) +// T = Client type +// U = SharedKeyCredential type +// Note : Could also make azcore.ClientOptions generic here if one day different storage service clients have additional options. This would also make the callback definitions easier. +type newClientCallbacks[T, U any] struct { + NewSharedKeyCredential func(string, string) (*U, error) + SharedKeyCredential func(string, *U, azcore.ClientOptions) (*T, error) + TokenCredential func(string, azcore.TokenCredential, azcore.ClientOptions) (*T, error) + NoCredential func(string, azcore.ClientOptions) (*T, error) +} + +// createClient is a generic method to allow client creation error handling to live in a single place +func createClient[T, U any](callbacks newClientCallbacks[T, U], u string, credInfo CredentialInfo, credOpOptions *CredentialOpOptions, options azcore.ClientOptions) (client *T) { + var err error + if credOpOptions == nil { + credOpOptions = &CredentialOpOptions{ + LogError: glcm.Info, + } + } + switch credInfo.CredentialType { + case ECredentialType.OAuthToken(): + if credInfo.OAuthTokenInfo.IsEmpty() { + err = errors.New("invalid state, cannot get valid OAuth token information") + break + } + var tc azcore.TokenCredential + tc, err = credInfo.OAuthTokenInfo.GetTokenCredential() + if err != nil { + err = fmt.Errorf("unable to get token credential due to reason (%s)", err.Error()) + break + } + client, err = callbacks.TokenCredential(u, tc, options) + case ECredentialType.Anonymous(): + client, err = callbacks.NoCredential(u, options) + case ECredentialType.SharedKey(): + name := lcm.GetEnvironmentVariable(EEnvironmentVariable.AccountName()) + key := lcm.GetEnvironmentVariable(EEnvironmentVariable.AccountKey()) + // If the ACCOUNT_NAME and ACCOUNT_KEY are not set in environment variables + if name == "" || key == "" { + err = fmt.Errorf("ACCOUNT_NAME and ACCOUNT_KEY environment variables must be set before creating the SharedKey credential") + break + } + var sharedKey *U + sharedKey, err = callbacks.NewSharedKeyCredential(name, key) + if err != nil { + err = fmt.Errorf("unable to get shared key credential due to reason (%s)", err.Error()) + break + } + client, err = callbacks.SharedKeyCredential(u, sharedKey, options) + default: + err = fmt.Errorf("invalid state, credential type %v is not supported", credInfo.CredentialType) + } + if err != nil { + credOpOptions.panicError(err) + } + return client +} + +///////////////////////////////////////////////// BLOB FUNCTIONS ///////////////////////////////////////////////// + +// CreateBlobServiceClient creates a blob service client with credentials specified by credInfo +func CreateBlobServiceClient(u string, credInfo CredentialInfo, credOpOptions *CredentialOpOptions, options azcore.ClientOptions) *blobservice.Client { + callbacks := newClientCallbacks[blobservice.Client, blob.SharedKeyCredential]{ + TokenCredential: func(u string, tc azcore.TokenCredential, options azcore.ClientOptions) (*blobservice.Client, error) { + return blobservice.NewClient(u, tc, &blobservice.ClientOptions{ClientOptions: options}) + }, + NoCredential: func(u string, options azcore.ClientOptions) (*blobservice.Client, error) { + return blobservice.NewClientWithNoCredential(u, &blobservice.ClientOptions{ClientOptions: options}) + }, + SharedKeyCredential: func(u string, sharedKey *blob.SharedKeyCredential, options azcore.ClientOptions) (*blobservice.Client, error) { + return blobservice.NewClientWithSharedKeyCredential(u, sharedKey, &blobservice.ClientOptions{ClientOptions: options}) + }, + NewSharedKeyCredential: func(accountName string, accountKey string) (*blob.SharedKeyCredential, error) { + return blob.NewSharedKeyCredential(accountName, accountKey) + }, + } + + return createClient(callbacks, u, credInfo, credOpOptions, options) +} + +func CreateContainerClient(u string, credInfo CredentialInfo, credOpOptions *CredentialOpOptions, options azcore.ClientOptions) *container.Client { + callbacks := newClientCallbacks[container.Client, blob.SharedKeyCredential]{ + TokenCredential: func(u string, tc azcore.TokenCredential, options azcore.ClientOptions) (*container.Client, error) { + return container.NewClient(u, tc, &container.ClientOptions{ClientOptions: options}) + }, + NoCredential: func(u string, options azcore.ClientOptions) (*container.Client, error) { + return container.NewClientWithNoCredential(u, &container.ClientOptions{ClientOptions: options}) + }, + SharedKeyCredential: func(u string, sharedKey *blob.SharedKeyCredential, options azcore.ClientOptions) (*container.Client, error) { + return container.NewClientWithSharedKeyCredential(u, sharedKey, &container.ClientOptions{ClientOptions: options}) + }, + NewSharedKeyCredential: func(accountName string, accountKey string) (*blob.SharedKeyCredential, error) { + return blob.NewSharedKeyCredential(accountName, accountKey) + }, + } + + return createClient(callbacks, u, credInfo, credOpOptions, options) +} + +func CreateBlobClient(u string, credInfo CredentialInfo, credOpOptions *CredentialOpOptions, options azcore.ClientOptions) *blob.Client { + callbacks := newClientCallbacks[blob.Client, blob.SharedKeyCredential]{ + TokenCredential: func(u string, tc azcore.TokenCredential, options azcore.ClientOptions) (*blob.Client, error) { + return blob.NewClient(u, tc, &blob.ClientOptions{ClientOptions: options}) + }, + NoCredential: func(u string, options azcore.ClientOptions) (*blob.Client, error) { + return blob.NewClientWithNoCredential(u, &blob.ClientOptions{ClientOptions: options}) + }, + SharedKeyCredential: func(u string, sharedKey *blob.SharedKeyCredential, options azcore.ClientOptions) (*blob.Client, error) { + return blob.NewClientWithSharedKeyCredential(u, sharedKey, &blob.ClientOptions{ClientOptions: options}) + }, + NewSharedKeyCredential: func(accountName string, accountKey string) (*blob.SharedKeyCredential, error) { + return blob.NewSharedKeyCredential(accountName, accountKey) + }, + } + + return createClient(callbacks, u, credInfo, credOpOptions, options) +} + +func CreateAppendBlobClient(u string, credInfo CredentialInfo, credOpOptions *CredentialOpOptions, options azcore.ClientOptions) *appendblob.Client { + callbacks := newClientCallbacks[appendblob.Client, blob.SharedKeyCredential]{ + TokenCredential: func(u string, tc azcore.TokenCredential, options azcore.ClientOptions) (*appendblob.Client, error) { + return appendblob.NewClient(u, tc, &appendblob.ClientOptions{ClientOptions: options}) + }, + NoCredential: func(u string, options azcore.ClientOptions) (*appendblob.Client, error) { + return appendblob.NewClientWithNoCredential(u, &appendblob.ClientOptions{ClientOptions: options}) + }, + SharedKeyCredential: func(u string, sharedKey *blob.SharedKeyCredential, options azcore.ClientOptions) (*appendblob.Client, error) { + return appendblob.NewClientWithSharedKeyCredential(u, sharedKey, &appendblob.ClientOptions{ClientOptions: options}) + }, + NewSharedKeyCredential: func(accountName string, accountKey string) (*blob.SharedKeyCredential, error) { + return blob.NewSharedKeyCredential(accountName, accountKey) + }, + } + + return createClient(callbacks, u, credInfo, credOpOptions, options) +} + +func CreateBlockBlobClient(u string, credInfo CredentialInfo, credOpOptions *CredentialOpOptions, options azcore.ClientOptions) *blockblob.Client { + callbacks := newClientCallbacks[blockblob.Client, blob.SharedKeyCredential]{ + TokenCredential: func(u string, tc azcore.TokenCredential, options azcore.ClientOptions) (*blockblob.Client, error) { + return blockblob.NewClient(u, tc, &blockblob.ClientOptions{ClientOptions: options}) + }, + NoCredential: func(u string, options azcore.ClientOptions) (*blockblob.Client, error) { + return blockblob.NewClientWithNoCredential(u, &blockblob.ClientOptions{ClientOptions: options}) + }, + SharedKeyCredential: func(u string, sharedKey *blob.SharedKeyCredential, options azcore.ClientOptions) (*blockblob.Client, error) { + return blockblob.NewClientWithSharedKeyCredential(u, sharedKey, &blockblob.ClientOptions{ClientOptions: options}) + }, + NewSharedKeyCredential: func(accountName string, accountKey string) (*blob.SharedKeyCredential, error) { + return blob.NewSharedKeyCredential(accountName, accountKey) + }, + } + + return createClient(callbacks, u, credInfo, credOpOptions, options) +} + +func CreatePageBlobClient(u string, credInfo CredentialInfo, credOpOptions *CredentialOpOptions, options azcore.ClientOptions) *pageblob.Client { + callbacks := newClientCallbacks[pageblob.Client, blob.SharedKeyCredential]{ + TokenCredential: func(u string, tc azcore.TokenCredential, options azcore.ClientOptions) (*pageblob.Client, error) { + return pageblob.NewClient(u, tc, &pageblob.ClientOptions{ClientOptions: options}) + }, + NoCredential: func(u string, options azcore.ClientOptions) (*pageblob.Client, error) { + return pageblob.NewClientWithNoCredential(u, &pageblob.ClientOptions{ClientOptions: options}) + }, + SharedKeyCredential: func(u string, sharedKey *blob.SharedKeyCredential, options azcore.ClientOptions) (*pageblob.Client, error) { + return pageblob.NewClientWithSharedKeyCredential(u, sharedKey, &pageblob.ClientOptions{ClientOptions: options}) + }, + NewSharedKeyCredential: func(accountName string, accountKey string) (*blob.SharedKeyCredential, error) { + return blob.NewSharedKeyCredential(accountName, accountKey) + }, + } + + return createClient(callbacks, u, credInfo, credOpOptions, options) +} + +///////////////////////////////////////////////// FILE FUNCTIONS ///////////////////////////////////////////////// + +// CreateFileServiceClient creates a blob service client with credentials specified by credInfo +func CreateFileServiceClient(u string, credInfo CredentialInfo, credOpOptions *CredentialOpOptions, options azcore.ClientOptions) *fileservice.Client { + callbacks := newClientCallbacks[fileservice.Client, sharefile.SharedKeyCredential]{ + TokenCredential: func(u string, tc azcore.TokenCredential, options azcore.ClientOptions) (*fileservice.Client, error) { + return nil, fmt.Errorf("invalid state, credential type %v is not supported", credInfo.CredentialType) + }, + NoCredential: func(u string, options azcore.ClientOptions) (*fileservice.Client, error) { + return fileservice.NewClientWithNoCredential(u, &fileservice.ClientOptions{ClientOptions: options}) + }, + SharedKeyCredential: func(u string, sharedKey *sharefile.SharedKeyCredential, options azcore.ClientOptions) (*fileservice.Client, error) { + return nil, fmt.Errorf("invalid state, credential type %v is not supported", credInfo.CredentialType) + }, + NewSharedKeyCredential: func(accountName string, accountKey string) (*sharefile.SharedKeyCredential, error) { + return nil, fmt.Errorf("invalid state, credential type %v is not supported", credInfo.CredentialType) + }, + } + + return createClient(callbacks, u, credInfo, credOpOptions, options) +} + +func CreateShareClient(u string, credInfo CredentialInfo, credOpOptions *CredentialOpOptions, options azcore.ClientOptions) *share.Client { + callbacks := newClientCallbacks[share.Client, sharefile.SharedKeyCredential]{ + TokenCredential: func(u string, tc azcore.TokenCredential, options azcore.ClientOptions) (*share.Client, error) { + return nil, fmt.Errorf("invalid state, credential type %v is not supported", credInfo.CredentialType) + }, + NoCredential: func(u string, options azcore.ClientOptions) (*share.Client, error) { + return share.NewClientWithNoCredential(u, &share.ClientOptions{ClientOptions: options}) + }, + SharedKeyCredential: func(u string, sharedKey *sharefile.SharedKeyCredential, options azcore.ClientOptions) (*share.Client, error) { + return nil, fmt.Errorf("invalid state, credential type %v is not supported", credInfo.CredentialType) + }, + NewSharedKeyCredential: func(accountName string, accountKey string) (*sharefile.SharedKeyCredential, error) { + return nil, fmt.Errorf("invalid state, credential type %v is not supported", credInfo.CredentialType) + }, + } + + return createClient(callbacks, u, credInfo, credOpOptions, options) +} + +func CreateShareFileClient(u string, credInfo CredentialInfo, credOpOptions *CredentialOpOptions, options azcore.ClientOptions) *sharefile.Client { + callbacks := newClientCallbacks[sharefile.Client, sharefile.SharedKeyCredential]{ + TokenCredential: func(u string, tc azcore.TokenCredential, options azcore.ClientOptions) (*sharefile.Client, error) { + return nil, fmt.Errorf("invalid state, credential type %v is not supported", credInfo.CredentialType) + }, + NoCredential: func(u string, options azcore.ClientOptions) (*sharefile.Client, error) { + return sharefile.NewClientWithNoCredential(u, &sharefile.ClientOptions{ClientOptions: options}) + }, + SharedKeyCredential: func(u string, sharedKey *sharefile.SharedKeyCredential, options azcore.ClientOptions) (*sharefile.Client, error) { + return nil, fmt.Errorf("invalid state, credential type %v is not supported", credInfo.CredentialType) + }, + NewSharedKeyCredential: func(accountName string, accountKey string) (*sharefile.SharedKeyCredential, error) { + return nil, fmt.Errorf("invalid state, credential type %v is not supported", credInfo.CredentialType) + }, + } + + return createClient(callbacks, u, credInfo, credOpOptions, options) +} + +func CreateShareDirectoryClient(u string, credInfo CredentialInfo, credOpOptions *CredentialOpOptions, options azcore.ClientOptions) *sharedirectory.Client { + callbacks := newClientCallbacks[sharedirectory.Client, sharefile.SharedKeyCredential]{ + TokenCredential: func(u string, tc azcore.TokenCredential, options azcore.ClientOptions) (*sharedirectory.Client, error) { + return nil, fmt.Errorf("invalid state, credential type %v is not supported", credInfo.CredentialType) + }, + NoCredential: func(u string, options azcore.ClientOptions) (*sharedirectory.Client, error) { + return sharedirectory.NewClientWithNoCredential(u, &sharedirectory.ClientOptions{ClientOptions: options}) + }, + SharedKeyCredential: func(u string, sharedKey *sharefile.SharedKeyCredential, options azcore.ClientOptions) (*sharedirectory.Client, error) { + return nil, fmt.Errorf("invalid state, credential type %v is not supported", credInfo.CredentialType) + }, + NewSharedKeyCredential: func(accountName string, accountKey string) (*sharefile.SharedKeyCredential, error) { + return nil, fmt.Errorf("invalid state, credential type %v is not supported", credInfo.CredentialType) + }, + } + + return createClient(callbacks, u, credInfo, credOpOptions, options) +} \ No newline at end of file diff --git a/common/credCacheGnomeKeyringShim_linux.go b/common/credCacheGnomeKeyringShim_linux.go index 7a79f630c..0c79448d3 100644 --- a/common/credCacheGnomeKeyringShim_linux.go +++ b/common/credCacheGnomeKeyringShim_linux.go @@ -1,4 +1,6 @@ +//go:build !se_integration // +build !se_integration + // For public version azcopy, gnome keyring is not necessary, and no need to // involve additional dependencies to libsecret-1 and glib-2.0 @@ -8,6 +10,7 @@ import "errors" type gnomeKeyring struct{} +//nolint:staticcheck func (p gnomeKeyring) Get(Service string, Account string) (string, error) { // By design, not useful for non integration scenario. return "", errors.New("Not implemented") diff --git a/common/credCacheInternal_linux.go b/common/credCacheInternal_linux.go index 8c358f3ca..efeb41bc6 100644 --- a/common/credCacheInternal_linux.go +++ b/common/credCacheInternal_linux.go @@ -92,7 +92,7 @@ func (c *CredCacheInternalIntegration) LoadToken() (*OAuthTokenInfo, error) { // hasCachedTokenInternal returns if there is cached token in token manager. func (c *CredCacheInternalIntegration) hasCachedTokenInternal() (bool, error) { - if _, err := c.keyring.Get(c.serviceName, c.accountName); err != nil { + if _, err := c.keyring.Get(c.serviceName, c.accountName); err != nil { //nolint:staticcheck return false, fmt.Errorf("failed to find token from gnome keyring, %v", err) } @@ -106,6 +106,7 @@ func (c *CredCacheInternalIntegration) removeCachedTokenInternal() error { } // loadTokenInternal restores a Token object from file cache. +//nolint:staticcheck func (c *CredCacheInternalIntegration) loadTokenInternal() (*OAuthTokenInfo, error) { data, err := c.keyring.Get(c.serviceName, c.accountName) if err != nil { diff --git a/common/credentialFactory.go b/common/credentialFactory.go index 7ce6905b7..135430720 100644 --- a/common/credentialFactory.go +++ b/common/credentialFactory.go @@ -25,6 +25,8 @@ import ( "errors" "fmt" "github.com/Azure/azure-pipeline-go/pipeline" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "math" "strings" "sync" @@ -33,7 +35,6 @@ import ( gcpUtils "cloud.google.com/go/storage" "github.com/Azure/azure-storage-azcopy/v10/azbfs" - "github.com/Azure/azure-storage-blob-go/azblob" "github.com/Azure/go-autorest/autorest/adal" "github.com/minio/minio-go" "github.com/minio/minio-go/pkg/credentials" @@ -56,7 +57,7 @@ type CredentialOpOptions struct { // callerMessage formats caller message prefix. func (o CredentialOpOptions) callerMessage() string { - return IffString(o.CallerID == "", o.CallerID, o.CallerID+" ") + return Iff(o.CallerID == "", o.CallerID, o.CallerID+" ") } // logInfo logs info, if LogInfo is specified in CredentialOpOptions. @@ -91,32 +92,19 @@ func (o CredentialOpOptions) cancel() { } } -// CreateBlobCredential creates Blob credential according to credential info. -func CreateBlobCredential(ctx context.Context, credInfo CredentialInfo, options CredentialOpOptions) azblob.Credential { - credential := azblob.NewAnonymousCredential() - +// GetSourceBlobCredential gets the TokenCredential based on the cred info +func GetSourceBlobCredential(credInfo CredentialInfo, options CredentialOpOptions) (azcore.TokenCredential, error) { if credInfo.CredentialType.IsAzureOAuth() { if credInfo.OAuthTokenInfo.IsEmpty() { options.panicError(errors.New("invalid state, cannot get valid OAuth token information")) } - - if credInfo.CredentialType == ECredentialType.MDOAuthToken() { - credInfo.OAuthTokenInfo.Resource = MDResource // token will instantly refresh with this - } - - // Create TokenCredential with refresher. - if credInfo.SourceBlobToken != nil { - return credInfo.SourceBlobToken + if credInfo.S2SSourceTokenCredential != nil { + return credInfo.S2SSourceTokenCredential, nil } else { - return azblob.NewTokenCredential( - credInfo.OAuthTokenInfo.AccessToken, - func(credential azblob.TokenCredential) time.Duration { - return refreshBlobToken(ctx, credInfo.OAuthTokenInfo, credential, options) - }) + return credInfo.OAuthTokenInfo.GetTokenCredential() } } - - return credential + return nil, nil } // refreshPolicyHalfOfExpiryWithin is used for calculating next refresh time, @@ -145,27 +133,6 @@ func refreshPolicyHalfOfExpiryWithin(token *adal.Token, options CredentialOpOpti return waitDuration } -func refreshBlobToken(ctx context.Context, tokenInfo OAuthTokenInfo, tokenCredential azblob.TokenCredential, options CredentialOpOptions) time.Duration { - newToken, err := tokenInfo.Refresh(ctx) - if err != nil { - // Fail to get new token. - if _, ok := err.(adal.TokenRefreshError); ok && strings.Contains(err.Error(), "refresh token has expired") { - options.logError(fmt.Sprintf("failed to refresh token, OAuth refresh token has expired, please log in with azcopy login command again. (Error details: %v)", err)) - } else { - options.logError(fmt.Sprintf("failed to refresh token, please check error details and try to log in with azcopy login command again. (Error details: %v)", err)) - } - // Try to refresh again according to original token's info. - return refreshPolicyHalfOfExpiryWithin(&(tokenInfo.Token), options) - } - - // Token has been refreshed successfully. - tokenCredential.SetToken(newToken.AccessToken) - options.logInfo(fmt.Sprintf("%v token refreshed successfully", time.Now().UTC())) - - // Calculate wait duration, and schedule next refresh. - return refreshPolicyHalfOfExpiryWithin(newToken, options) -} - // CreateBlobFSCredential creates BlobFS credential according to credential info. func CreateBlobFSCredential(ctx context.Context, credInfo CredentialInfo, options CredentialOpOptions) azbfs.Credential { cred := azbfs.NewAnonymousCredential() @@ -343,37 +310,34 @@ func (f *GCPClientFactory) GetGCPClient(ctx context.Context, credInfo Credential } } -// Default Encryption Algorithm Supported -const EncryptionAlgorithmAES256 string = "AES256" - -func GetCpkInfo(cpkInfo bool) CpkInfo { +func GetCpkInfo(cpkInfo bool) *blob.CPKInfo { if !cpkInfo { - return CpkInfo{} + return nil } // fetch EncryptionKey and EncryptionKeySHA256 from the environment variables glcm := GetLifecycleMgr() encryptionKey := glcm.GetEnvironmentVariable(EEnvironmentVariable.CPKEncryptionKey()) encryptionKeySHA256 := glcm.GetEnvironmentVariable(EEnvironmentVariable.CPKEncryptionKeySHA256()) - encryptionAlgorithmAES256 := EncryptionAlgorithmAES256 + encryptionAlgorithmAES256 := blob.EncryptionAlgorithmTypeAES256 if encryptionKey == "" || encryptionKeySHA256 == "" { glcm.Error("fatal: failed to fetch cpk encryption key (" + EEnvironmentVariable.CPKEncryptionKey().Name + ") or hash (" + EEnvironmentVariable.CPKEncryptionKeySHA256().Name + ") from environment variables") } - return CpkInfo{ + return &blob.CPKInfo{ EncryptionKey: &encryptionKey, - EncryptionKeySha256: &encryptionKeySHA256, + EncryptionKeySHA256: &encryptionKeySHA256, EncryptionAlgorithm: &encryptionAlgorithmAES256, } } -func GetCpkScopeInfo(cpkScopeInfo string) CpkScopeInfo { +func GetCpkScopeInfo(cpkScopeInfo string) *blob.CPKScopeInfo { if cpkScopeInfo == "" { - return CpkScopeInfo{} + return nil } else { - return CpkScopeInfo{ + return &blob.CPKScopeInfo{ EncryptionScope: &cpkScopeInfo, } } diff --git a/common/environment.go b/common/environment.go index d1333f00e..f81333cf9 100644 --- a/common/environment.go +++ b/common/environment.go @@ -78,7 +78,7 @@ var EEnvironmentVariable = EnvironmentVariable{} func (EnvironmentVariable) UserDir() EnvironmentVariable { // Only used internally, not listed in the environment variables. return EnvironmentVariable{ - Name: IffString(runtime.GOOS == "windows", "USERPROFILE", "HOME"), + Name: Iff(runtime.GOOS == "windows", "USERPROFILE", "HOME"), } } @@ -145,7 +145,7 @@ func (EnvironmentVariable) ManagedIdentityClientID() EnvironmentVariable { func (EnvironmentVariable) ManagedIdentityObjectID() EnvironmentVariable { return EnvironmentVariable{ Name: "AZCOPY_MSI_OBJECT_ID", - Description: "Object ID for user-assigned identity. This variable is only used for auto login, please use the command line flag instead when invoking the login command.", + Description: "Object ID for user-assigned identity. This parameter is deprecated. Please use client id or resource id.", } } @@ -372,9 +372,9 @@ func (EnvironmentVariable) DownloadToTempPath() EnvironmentVariable { } func (EnvironmentVariable) DisableBlobTransferResume() EnvironmentVariable { - return EnvironmentVariable { - Name: "AZCOPY_DISABLE_INCOMPLETE_BLOB_TRANSFER", + return EnvironmentVariable{ + Name: "AZCOPY_DISABLE_INCOMPLETE_BLOB_TRANSFER", DefaultValue: "false", - Description: "An incomplete transfer to blob endpoint will be resumed from start if set to true", + Description: "An incomplete transfer to blob endpoint will be resumed from start if set to true", } -} \ No newline at end of file +} diff --git a/common/extensions.go b/common/extensions.go index 4e721973c..71dc33f65 100644 --- a/common/extensions.go +++ b/common/extensions.go @@ -10,8 +10,6 @@ import ( "strings" "github.com/Azure/azure-storage-azcopy/v10/azbfs" - - "github.com/Azure/azure-storage-file-go/azfile" ) // /////////////////////////////////////////////////////////////////////////////////////////////// @@ -84,22 +82,6 @@ func RedactSecretQueryParam(rawQuery, queryKeyNeedRedact string) (bool, string) return sigFound, values.Encode() } -// /////////////////////////////////////////////////////////////////////////////////////////////// -type FileURLPartsExtension struct { - azfile.FileURLParts -} - -func (parts FileURLPartsExtension) GetShareURL() url.URL { - parts.DirectoryOrFilePath = "" - return parts.URL() -} - -func (parts FileURLPartsExtension) GetServiceURL() url.URL { - parts.ShareName = "" - parts.DirectoryOrFilePath = "" - return parts.URL() -} - // /////////////////////////////////////////////////////////////////////////////////////////////// type HTTPResponseExtension struct { *http.Response diff --git a/common/extensions_test.go b/common/extensions_test.go index c04a6d9f4..d154cdad6 100644 --- a/common/extensions_test.go +++ b/common/extensions_test.go @@ -2,14 +2,13 @@ package common import ( "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" "github.com/stretchr/testify/assert" "math/rand" "net/url" "strings" "testing" "unsafe" - - "github.com/Azure/azure-storage-blob-go/azblob" ) func TestGenerateFullPath(t *testing.T) { @@ -156,7 +155,7 @@ func TestBlockblobBlockIDGeneration(t *testing.T) { jobIdStr := string((*[16]byte)(unsafe.Pointer(&jobId))[:]) // 16Byte jobID partNum := rand.Int31n(maxNumberOfParts) // 5B partNumber fileIndex := rand.Int31n(numOfFilesPerDispatchJobPart) // 5Byte index of file in part - blockIndex := rand.Int31n(azblob.BlockBlobMaxBlocks) // 5B blockIndex + blockIndex := rand.Int31n(blockblob.MaxBlocks) // 5B blockIndex blockNamePrefix := fmt.Sprintf("%s%s%05d%05d", placeHolder, jobIdStr, partNum, fileIndex) blockName := GenerateBlockBlobBlockID(blockNamePrefix, blockIndex) diff --git a/common/fe-ste-models.go b/common/fe-ste-models.go index 5a90de0c7..e2f04d5eb 100644 --- a/common/fe-ste-models.go +++ b/common/fe-ste-models.go @@ -25,6 +25,9 @@ import ( "encoding/json" "errors" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + sharefile "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" "math" "os" "reflect" @@ -36,8 +39,6 @@ import ( "github.com/Azure/azure-storage-azcopy/v10/azbfs" "github.com/Azure/azure-pipeline-go/pipeline" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" "github.com/JeffreyRichter/enum/enum" ) @@ -140,12 +141,12 @@ func (d *DeleteSnapshotsOption) Parse(s string) error { return err } -func (d DeleteSnapshotsOption) ToDeleteSnapshotsOptionType() azblob.DeleteSnapshotsOptionType { +func (d DeleteSnapshotsOption) ToDeleteSnapshotsOptionType() *blob.DeleteSnapshotsOptionType { if d == EDeleteSnapshotsOption.None() { - return azblob.DeleteSnapshotsOptionNone + return nil } - return azblob.DeleteSnapshotsOptionType(strings.ToLower(d.String())) + return to.Ptr(blob.DeleteSnapshotsOptionType(strings.ToLower(d.String()))) } // ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -204,11 +205,11 @@ func (p PermanentDeleteOption) String() string { return enum.StringInt(p, reflect.TypeOf(p)) } -func (p PermanentDeleteOption) ToPermanentDeleteOptionType() azblob.BlobDeleteType { +func (p PermanentDeleteOption) ToPermanentDeleteOptionType() *blob.DeleteType { if p == EPermanentDeleteOption.None() { - return azblob.BlobDeleteNone + return nil } - return azblob.BlobDeletePermanent + return to.Ptr(blob.DeleteTypePermanent) } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -681,31 +682,31 @@ func (bt *BlobType) Parse(s string) error { return err } -func FromAzBlobType(bt azblob.BlobType) BlobType { +func FromBlobType(bt blob.BlobType) BlobType { switch bt { - case azblob.BlobBlockBlob: + case blob.BlobTypeBlockBlob: return EBlobType.BlockBlob() - case azblob.BlobPageBlob: + case blob.BlobTypePageBlob: return EBlobType.PageBlob() - case azblob.BlobAppendBlob: + case blob.BlobTypeAppendBlob: return EBlobType.AppendBlob() default: return EBlobType.Detect() } } -// ToAzBlobType returns the equivalent azblob.BlobType for given string. -func (bt *BlobType) ToAzBlobType() azblob.BlobType { +// ToBlobType returns the equivalent blob.BlobType for given string. +func (bt *BlobType) ToBlobType() blob.BlobType { blobType := bt.String() switch blobType { - case string(azblob.BlobBlockBlob): - return azblob.BlobBlockBlob - case string(azblob.BlobPageBlob): - return azblob.BlobPageBlob - case string(azblob.BlobAppendBlob): - return azblob.BlobAppendBlob + case string(blob.BlobTypeBlockBlob): + return blob.BlobTypeBlockBlob + case string(blob.BlobTypePageBlob): + return blob.BlobTypePageBlob + case string(blob.BlobTypeAppendBlob): + return blob.BlobTypeAppendBlob default: - return azblob.BlobNone + return "" } } @@ -812,8 +813,8 @@ func (bbt *BlockBlobTier) Parse(s string) error { return err } -func (bbt BlockBlobTier) ToAccessTierType() azblob.AccessTierType { - return azblob.AccessTierType(bbt.String()) +func (bbt BlockBlobTier) ToAccessTierType() blob.AccessTier { + return blob.AccessTier(bbt.String()) } func (bbt BlockBlobTier) MarshalJSON() ([]byte, error) { @@ -857,8 +858,8 @@ func (pbt *PageBlobTier) Parse(s string) error { return err } -func (pbt PageBlobTier) ToAccessTierType() azblob.AccessTierType { - return azblob.AccessTierType(pbt.String()) +func (pbt PageBlobTier) ToAccessTierType() blob.AccessTier { + return blob.AccessTier(pbt.String()) } func (pbt PageBlobTier) MarshalJSON() ([]byte, error) { @@ -1054,8 +1055,8 @@ type CopyTransfer struct { Metadata Metadata // Properties for S2S blob copy - BlobType azblob.BlobType - BlobTier azblob.AccessTierType + BlobType blob.BlobType + BlobTier blob.AccessTier BlobVersionID string // Blob index tags categorize data in your storage account utilizing key-value tag attributes BlobTags BlobTags @@ -1068,7 +1069,7 @@ type CopyTransfer struct { // Metadata used in AzCopy. const MetadataAndBlobTagsClearFlag = "clear" // clear flag used for metadata and tags -type Metadata map[string]string +type Metadata map[string]*string func (m Metadata) Clone() Metadata { out := make(Metadata) @@ -1080,26 +1081,6 @@ func (m Metadata) Clone() Metadata { return out } -// ToAzBlobMetadata converts metadata to azblob's metadata. -func (m Metadata) ToAzBlobMetadata() azblob.Metadata { - return azblob.Metadata(m) -} - -// ToAzFileMetadata converts metadata to azfile's metadata. -func (m Metadata) ToAzFileMetadata() azfile.Metadata { - return azfile.Metadata(m) -} - -// FromAzBlobMetadataToCommonMetadata converts azblob's metadata to common metadata. -func FromAzBlobMetadataToCommonMetadata(m azblob.Metadata) Metadata { - return Metadata(m) -} - -// FromAzFileMetadataToCommonMetadata converts azfile's metadata to common metadata. -func FromAzFileMetadataToCommonMetadata(m azfile.Metadata) Metadata { - return Metadata(m) -} - // Marshal marshals metadata to string. func (m Metadata) Marshal() (string, error) { b, err := json.Marshal(m) @@ -1156,7 +1137,8 @@ func StringToMetadata(metadataString string) (Metadata, error) { return Metadata{}, errors.New("metadata names must conform to C# naming rules (https://learn.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata#metadata-names)") } - metadataMap[cKey] = cVal + finalValue := cVal + metadataMap[cKey] = &finalValue cKey = "" cVal = "" keySet = false @@ -1172,7 +1154,8 @@ func StringToMetadata(metadataString string) (Metadata, error) { } if cKey != "" { - metadataMap[cKey] = cVal + finalValue := cVal + metadataMap[cKey] = &finalValue } } return metadataMap, nil @@ -1215,8 +1198,8 @@ func isValidMetadataKeyFirstChar(c byte) bool { } func (m Metadata) ExcludeInvalidKey() (retainedMetadata Metadata, excludedMetadata Metadata, invalidKeyExists bool) { - retainedMetadata = make(map[string]string) - excludedMetadata = make(map[string]string) + retainedMetadata = make(map[string]*string) + excludedMetadata = make(map[string]*string) for k, v := range m { if isValidMetadataKey(k) { retainedMetadata[k] = v @@ -1232,16 +1215,6 @@ func (m Metadata) ExcludeInvalidKey() (retainedMetadata Metadata, excludedMetada // BlobTags is a map of key-value pair type BlobTags map[string]string -// ToAzBlobTagsMap converts BlobTagsMap to azblob's BlobTagsMap -func (bt BlobTags) ToAzBlobTagsMap() azblob.BlobTagsMap { - return azblob.BlobTagsMap(bt) -} - -//// FromAzBlobTagsMapToCommonBlobTags converts azblob's BlobTagsMap to common BlobTags -// func FromAzBlobTagsMapToCommonBlobTags(azbt azblob.BlobTagsMap) BlobTags { -// return BlobTags(azbt) -// } - func (bt BlobTags) ToString() string { lst := make([]string, 0) for k, v := range bt { @@ -1283,7 +1256,7 @@ var metadataKeyRenameErrStr = "failed to rename invalid metadata key %q" // Note: To keep first version simple, whenever collision is found during key resolving, error will be returned. // This can be further improved once any user feedback get. func (m Metadata) ResolveInvalidKey() (resolvedMetadata Metadata, err error) { - resolvedMetadata = make(map[string]string) + resolvedMetadata = make(map[string]*string) hasCollision := func(name string) bool { _, hasCollisionToOrgNames := m[name] @@ -1293,18 +1266,22 @@ func (m Metadata) ResolveInvalidKey() (resolvedMetadata Metadata, err error) { } for k, v := range m { + value := v + valueString := &value + key := k + keyString := &key if !isValidMetadataKey(k) { validKey := metadataKeyInvalidCharRegex.ReplaceAllString(k, "_") renamedKey := metadataRenamedKeyPrefix + validKey keyForRenamedOriginalKey := metadataKeyForRenamedOriginalKeyPrefix + validKey if hasCollision(renamedKey) || hasCollision(keyForRenamedOriginalKey) { - return nil, fmt.Errorf(metadataKeyRenameErrStr, k) + return nil, fmt.Errorf(metadataKeyRenameErrStr, *keyString) } - resolvedMetadata[renamedKey] = v - resolvedMetadata[keyForRenamedOriginalKey] = k + resolvedMetadata[renamedKey] = *valueString + resolvedMetadata[keyForRenamedOriginalKey] = keyString } else { - resolvedMetadata[k] = v + resolvedMetadata[k] = *valueString } } @@ -1335,27 +1312,27 @@ type ResourceHTTPHeaders struct { CacheControl string } -// ToAzBlobHTTPHeaders converts ResourceHTTPHeaders to azblob's BlobHTTPHeaders. -func (h ResourceHTTPHeaders) ToAzBlobHTTPHeaders() azblob.BlobHTTPHeaders { - return azblob.BlobHTTPHeaders{ - ContentType: h.ContentType, - ContentMD5: h.ContentMD5, - ContentEncoding: h.ContentEncoding, - ContentLanguage: h.ContentLanguage, - ContentDisposition: h.ContentDisposition, - CacheControl: h.CacheControl, +// ToBlobHTTPHeaders converts ResourceHTTPHeaders to blob's HTTPHeaders. +func (h ResourceHTTPHeaders) ToBlobHTTPHeaders() blob.HTTPHeaders { + return blob.HTTPHeaders{ + BlobContentType: &h.ContentType, + BlobContentMD5: h.ContentMD5, + BlobContentEncoding: &h.ContentEncoding, + BlobContentLanguage: &h.ContentLanguage, + BlobContentDisposition: &h.ContentDisposition, + BlobCacheControl: &h.CacheControl, } } -// ToAzFileHTTPHeaders converts ResourceHTTPHeaders to azfile's FileHTTPHeaders. -func (h ResourceHTTPHeaders) ToAzFileHTTPHeaders() azfile.FileHTTPHeaders { - return azfile.FileHTTPHeaders{ - ContentType: h.ContentType, +// ToFileHTTPHeaders converts ResourceHTTPHeaders to sharefile's HTTPHeaders. +func (h ResourceHTTPHeaders) ToFileHTTPHeaders() sharefile.HTTPHeaders { + return sharefile.HTTPHeaders{ + ContentType: &h.ContentType, ContentMD5: h.ContentMD5, - ContentEncoding: h.ContentEncoding, - ContentLanguage: h.ContentLanguage, - ContentDisposition: h.ContentDisposition, - CacheControl: h.CacheControl, + ContentEncoding: &h.ContentEncoding, + ContentLanguage: &h.ContentLanguage, + ContentDisposition: &h.ContentDisposition, + CacheControl: &h.CacheControl, } } @@ -1581,63 +1558,6 @@ func (p PreservePermissionsOption) IsTruthy() bool { } } -//////////////////////////////////////////////////////////////// - -// CpkScopeInfo specifies the name of the encryption scope to use to encrypt the data provided in the request. -// If not specified, encryption is performed with the default account encryption scope. -// For more information, see Encryption at Rest for Azure Storage Services. -type CpkScopeInfo struct { - EncryptionScope *string -} - -func (csi CpkScopeInfo) Marshal() (string, error) { - result, err := json.Marshal(csi) - if err != nil { - return "", err - } - return string(result), nil -} - -type CpkInfo struct { - // The algorithm used to produce the encryption key hash. - // Currently, the only accepted value is "AES256". - // Must be provided if the x-ms-encryption-key header is provided. - EncryptionAlgorithm *string - - // Optional. Specifies the encryption key to use to encrypt the data provided in the request. - // If not specified, encryption is performed with the root account encryption key. - EncryptionKey *string - - // The SHA-256 hash of the provided encryption key. - // Must be provided if the x-ms-encryption-key header is provided. - EncryptionKeySha256 *string -} - -func (csi CpkInfo) Empty() bool { - return csi.EncryptionKey == nil || csi.EncryptionKeySha256 == nil -} - -func (csi CpkInfo) Marshal() (string, error) { - result, err := json.Marshal(csi) - if err != nil { - return "", err - } - return string(result), nil -} - -func ToClientProvidedKeyOptions(cpkInfo CpkInfo, cpkScopeInfo CpkScopeInfo) azblob.ClientProvidedKeyOptions { - if cpkInfo.Empty() && cpkScopeInfo.EncryptionScope == nil { - return azblob.ClientProvidedKeyOptions{} - } - - return azblob.ClientProvidedKeyOptions{ - EncryptionKey: cpkInfo.EncryptionKey, - EncryptionAlgorithm: azblob.EncryptionAlgorithmAES256, - EncryptionKeySha256: cpkInfo.EncryptionKeySha256, - EncryptionScope: cpkScopeInfo.EncryptionScope, - } -} - type CpkOptions struct { // Optional flag to encrypt user data with user provided key. // Key is provide in the REST request itself @@ -1652,10 +1572,20 @@ type CpkOptions struct { IsSourceEncrypted bool } -func GetClientProvidedKey(options CpkOptions) azblob.ClientProvidedKeyOptions { - _cpkInfo := GetCpkInfo(options.CpkInfo) - _cpkScopeInfo := GetCpkScopeInfo(options.CpkScopeInfo) - return ToClientProvidedKeyOptions(_cpkInfo, _cpkScopeInfo) +func (options CpkOptions) GetCPKInfo() *blob.CPKInfo { + if !options.IsSourceEncrypted { + return nil + } else { + return GetCpkInfo(options.CpkInfo) + } +} + +func (options CpkOptions) GetCPKScopeInfo() *blob.CPKScopeInfo { + if !options.IsSourceEncrypted { + return nil + } else { + return GetCpkScopeInfo(options.CpkScopeInfo) + } } // ////////////////////////////////////////////////////////////////////////////// @@ -1701,14 +1631,14 @@ func (rpt RehydratePriorityType) String() string { return enum.StringInt(rpt, reflect.TypeOf(rpt)) } -func (rpt RehydratePriorityType) ToRehydratePriorityType() azblob.RehydratePriorityType { +func (rpt RehydratePriorityType) ToRehydratePriorityType() blob.RehydratePriority { switch rpt { case ERehydratePriorityType.None(), ERehydratePriorityType.Standard(): - return azblob.RehydratePriorityStandard + return blob.RehydratePriorityStandard case ERehydratePriorityType.High(): - return azblob.RehydratePriorityHigh + return blob.RehydratePriorityHigh default: - return azblob.RehydratePriorityStandard + return blob.RehydratePriorityStandard } } diff --git a/common/fe-ste-models_test.go b/common/fe-ste-models_test.go index 7321034d8..31a5218f5 100644 --- a/common/fe-ste-models_test.go +++ b/common/fe-ste-models_test.go @@ -90,35 +90,45 @@ func TestIsJobDone(t *testing.T) { } func getInvalidMetadataSample() common.Metadata { - m := make(map[string]string) + temp := make(map[string]string) // number could not be first char for azure metadata key. - m["1abc"] = "v:1abc" + temp["1abc"] = "v:1abc" // special char - m["a!@#"] = "v:a!@#" - m["a-metadata-samplE"] = "v:a-metadata-samplE" + temp["a!@#"] = "v:a!@#" + temp["a-metadata-samplE"] = "v:a-metadata-samplE" // valid metadata - m["abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRUSTUVWXYZ1234567890_"] = "v:abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRUSTUVWXYZ1234567890_" - m["Am"] = "v:Am" - m["_123"] = "v:_123" + temp["abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRUSTUVWXYZ1234567890_"] = "v:abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRUSTUVWXYZ1234567890_" + temp["Am"] = "v:Am" + temp["_123"] = "v:_123" - return m + return toCommonMetadata(temp) } func getValidMetadataSample() common.Metadata { - m := make(map[string]string) - m["Key"] = "value" + m := make(map[string]*string) + v := "value" + m["Key"] = &v return m } -func validateMapEqual(a *assert.Assertions, m1 map[string]string, m2 map[string]string) { +func toCommonMetadata(temp map[string]string) common.Metadata { + m := make(map[string]*string) + for k, v := range temp { + value := v + m[k] = &value + } + return m +} + +func validateMapEqual(a *assert.Assertions, m1 map[string]*string, m2 map[string]string) { a.Equal(len(m2), len(m1)) for k1, v1 := range m1 { - a.Equal(v1, m2[k1]) + a.Equal(*v1, m2[k1]) } } @@ -162,9 +172,9 @@ func TestMetadataResolveInvalidKey(t *testing.T) { // In this phase we keep the resolve logic easy, and whenever there is key resolving collision found, error reported. func TestMetadataResolveInvalidKeyNegative(t *testing.T) { a := assert.New(t) - mNegative1 := common.Metadata(map[string]string{"!": "!", "*": "*"}) - mNegative2 := common.Metadata(map[string]string{"!": "!", "rename__": "rename__"}) - mNegative3 := common.Metadata(map[string]string{"!": "!", "rename_key__": "rename_key__"}) + mNegative1 := toCommonMetadata(map[string]string{"!": "!", "*": "*"}) + mNegative2 := toCommonMetadata(map[string]string{"!": "!", "rename__": "rename__"}) + mNegative3 := toCommonMetadata(map[string]string{"!": "!", "rename_key__": "rename_key__"}) _, err := mNegative1.ResolveInvalidKey() a.NotNil(err) diff --git a/common/gcpModels.go b/common/gcpModels.go index 274b87492..16918d0f3 100644 --- a/common/gcpModels.go +++ b/common/gcpModels.go @@ -43,7 +43,8 @@ func (gie *GCPObjectInfoExtension) NewCommonMetadata() Metadata { for k, v := range gie.ObjectInfo.Metadata { if len(k) > gcpMetadataPrefixLen { if prefix := k[0:gcpMetadataPrefixLen]; strings.EqualFold(prefix, gcpMetadataPrefix) { - md[k[gcpMetadataPrefixLen:]] = v + value := v + md[k[gcpMetadataPrefixLen:]] = &value } } } diff --git a/common/genericResourceURLParts.go b/common/genericResourceURLParts.go index c95f18624..8a7acc0c0 100644 --- a/common/genericResourceURLParts.go +++ b/common/genericResourceURLParts.go @@ -2,11 +2,10 @@ package common import ( "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + sharefile "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" "net/url" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" - "github.com/Azure/azure-storage-azcopy/v10/azbfs" ) @@ -16,8 +15,8 @@ import ( // The above suggestion would be preferable to continuing to expand this (due to 4x code dupe for every function)-- it's just a bridge over a LARGE gap for now. type GenericResourceURLParts struct { location Location // underlying location selects which URLParts we're using - blobURLParts azblob.BlobURLParts - fileURLParts azfile.FileURLParts + blobURLParts blob.URLParts + fileURLParts sharefile.URLParts bfsURLParts azbfs.BfsURLParts s3URLParts S3URLParts gcpURLParts GCPURLParts @@ -25,20 +24,21 @@ type GenericResourceURLParts struct { func NewGenericResourceURLParts(resourceURL url.URL, location Location) GenericResourceURLParts { g := GenericResourceURLParts{location: location} + var err error switch location { case ELocation.Blob(): - g.blobURLParts = azblob.NewBlobURLParts(resourceURL) + g.blobURLParts, err = blob.ParseURL(resourceURL.String()) + PanicIfErr(err) case ELocation.File(): - g.fileURLParts = azfile.NewFileURLParts(resourceURL) + g.fileURLParts, err = sharefile.ParseURL(resourceURL.String()) + PanicIfErr(err) case ELocation.BlobFS(): g.bfsURLParts = azbfs.NewBfsURLParts(resourceURL) case ELocation.S3(): - var err error g.s3URLParts, err = NewS3URLParts(resourceURL) PanicIfErr(err) case ELocation.GCP(): - var err error g.gcpURLParts, err = NewGCPURLParts(resourceURL) PanicIfErr(err) default: @@ -48,7 +48,7 @@ func NewGenericResourceURLParts(resourceURL url.URL, location Location) GenericR return g } -func (g GenericResourceURLParts) GetContainerName() string { +func (g *GenericResourceURLParts) GetContainerName() string { switch g.location { case ELocation.Blob(): return g.blobURLParts.ContainerName @@ -65,7 +65,7 @@ func (g GenericResourceURLParts) GetContainerName() string { } } -func (g GenericResourceURLParts) GetObjectName() string { +func (g *GenericResourceURLParts) GetObjectName() string { switch g.location { case ELocation.Blob(): return g.blobURLParts.BlobName @@ -99,7 +99,7 @@ func (g *GenericResourceURLParts) SetObjectName(objectName string) { } } -func (g GenericResourceURLParts) String() string { +func (g *GenericResourceURLParts) String() string { var URLOut url.URL switch g.location { @@ -108,9 +108,9 @@ func (g GenericResourceURLParts) String() string { case ELocation.GCP(): return g.gcpURLParts.String() case ELocation.Blob(): - URLOut = g.blobURLParts.URL() + return g.blobURLParts.String() case ELocation.File(): - URLOut = g.fileURLParts.URL() + return g.fileURLParts.String() case ELocation.BlobFS(): URLOut = g.bfsURLParts.URL() @@ -121,12 +121,18 @@ func (g GenericResourceURLParts) String() string { return URLOut.String() } -func (g GenericResourceURLParts) URL() url.URL { +func (g *GenericResourceURLParts) URL() url.URL { switch g.location { case ELocation.Blob(): - return g.blobURLParts.URL() + u := g.blobURLParts.String() + parsedURL, err := url.Parse(u) + PanicIfErr(err) + return *parsedURL case ELocation.File(): - return g.fileURLParts.URL() + u := g.fileURLParts.String() + parsedURL, err := url.Parse(u) + PanicIfErr(err) + return *parsedURL case ELocation.BlobFS(): return g.bfsURLParts.URL() case ELocation.S3(): diff --git a/common/iff.go b/common/iff.go index 9e6b29c32..f4654beaf 100644 --- a/common/iff.go +++ b/common/iff.go @@ -20,93 +20,20 @@ package common +// TODO : Remove this? // GetBlocksRoundedUp returns the number of blocks given size, rounded up func GetBlocksRoundedUp(size uint64, blockSize uint64) uint16 { - return uint16(size/blockSize) + Iffuint16((size%blockSize) == 0, 0, 1) + return uint16(size/blockSize) + uint16(Iff((size%blockSize) == 0, 0, 1)) } -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -// inline if functions -func IffError(test bool, trueVal, falseVal error) error { - if test { - return trueVal - } - return falseVal -} - -func IffString(test bool, trueVal, falseVal string) string { - if test { - return trueVal - } - return falseVal -} - -func IffUint8(test bool, trueVal, falseVal uint8) byte { - if test { - return trueVal - } - return falseVal -} - -func Iffint8(test bool, trueVal, falseVal int8) int8 { - if test { - return trueVal - } - return falseVal -} - -func Iffuint16(test bool, trueVal, falseVal uint16) uint16 { - if test { - return trueVal - } - return falseVal -} - -func Iffint16(test bool, trueVal, falseVal int16) int16 { - if test { - return trueVal - } - return falseVal -} - -func Iffuint32(test bool, trueVal, falseVal uint32) uint32 { - if test { - return trueVal - } - return falseVal -} - -func Iffint32(test bool, trueVal, falseVal int32) int32 { - if test { - return trueVal - } - return falseVal -} - -func Iffuint64(test bool, trueVal, falseVal uint64) uint64 { - if test { - return trueVal - } - return falseVal -} - -func Iffint64(test bool, trueVal, falseVal int64) int64 { - if test { - return trueVal - } - return falseVal -} - -func Iffloat64(test bool, trueVal, falseVal float64) float64 { +func Iff[T any](test bool, trueVal, falseVal T) T { if test { return trueVal } return falseVal } -// used to get properties in a safe, but not so verbose manner -func IffStringNotNil(wanted *string, instead string) string { +func IffNotNil[T any](wanted *T, instead T) T { if wanted == nil { return instead } diff --git a/common/logger.go b/common/logger.go index 6b30fda39..1329ac8c1 100644 --- a/common/logger.go +++ b/common/logger.go @@ -22,6 +22,8 @@ package common import ( "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + sharefile "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" "log" "net/url" "os" @@ -143,7 +145,7 @@ func (jl jobLogger) Panic(err error) { const TryEquals string = "Try=" // TODO: refactor so that this can be used by the retry policies too? So that when you search the logs for Try= you are guaranteed to find both types of retry (i.e. request send retries, and body read retries) -func NewReadLogFunc(logger ILogger, fullUrl *url.URL) func(int, error, int64, int64, bool) { +func NewV1ReadLogFunc(logger ILogger, fullUrl *url.URL) func(int, error, int64, int64, bool) { redactedUrl := URLStringExtension(fullUrl.String()).RedactSecretQueryParamForLogging() return func(failureCount int, err error, offset int64, count int64, willRetry bool) { @@ -169,6 +171,58 @@ func NewReadLogFunc(logger ILogger, fullUrl *url.URL) func(int, error, int64, in } } +func NewBlobReadLogFunc(logger ILogger, fullUrl string) func(int32, error, blob.HTTPRange, bool) { + redactedUrl := URLStringExtension(fullUrl).RedactSecretQueryParamForLogging() + + return func(failureCount int32, err error, r blob.HTTPRange, willRetry bool) { + retryMessage := "Will retry" + if !willRetry { + retryMessage = "Will NOT retry" + } + logger.Log(pipeline.LogInfo, fmt.Sprintf( + "Error reading body of reply. Next try (if any) will be %s%d. %s. Error: %s. Offset: %d Count: %d URL: %s", + TryEquals, // so that retry wording for body-read retries is similar to that for URL-hitting retries + + // We log the number of the NEXT try, not the failure just done, so that users searching the log for "Try=2" + // will find ALL retries, both the request send retries (which are logged as try 2 when they are made) and + // body read retries (for which only the failure is logged - so if we did the actual failure number, there would be + // not Try=2 in the logs if the retries work). + failureCount+1, + + retryMessage, + err, + r.Offset, + r.Count, + redactedUrl)) + } +} + +func NewFileReadLogFunc(logger ILogger, fullUrl string) func(int32, error, sharefile.HTTPRange, bool) { + redactedUrl := URLStringExtension(fullUrl).RedactSecretQueryParamForLogging() + + return func(failureCount int32, err error, r sharefile.HTTPRange, willRetry bool) { + retryMessage := "Will retry" + if !willRetry { + retryMessage = "Will NOT retry" + } + logger.Log(pipeline.LogInfo, fmt.Sprintf( + "Error reading body of reply. Next try (if any) will be %s%d. %s. Error: %s. Offset: %d Count: %d URL: %s", + TryEquals, // so that retry wording for body-read retries is similar to that for URL-hitting retries + + // We log the number of the NEXT try, not the failure just done, so that users searching the log for "Try=2" + // will find ALL retries, both the request send retries (which are logged as try 2 when they are made) and + // body read retries (for which only the failure is logged - so if we did the actual failure number, there would be + // not Try=2 in the logs if the retries work). + failureCount+1, + + retryMessage, + err, + r.Offset, + r.Count, + redactedUrl)) + } +} + func IsForceLoggingDisabled() bool { return GetLifecycleMgr().IsForceLoggingDisabled() } diff --git a/common/oauthTokenManager.go b/common/oauthTokenManager.go index fc271cd4a..36d388aa2 100644 --- a/common/oauthTokenManager.go +++ b/common/oauthTokenManager.go @@ -21,28 +21,24 @@ package common import ( - "bufio" "context" - "crypto/rsa" - "crypto/x509" "encoding/json" - "encoding/pem" "errors" "fmt" - "io" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/go-autorest/autorest/date" "net" "net/http" + "net/url" "os" - "path" "path/filepath" - "runtime" "strconv" "strings" - "syscall" "time" - "golang.org/x/crypto/pkcs12" - "github.com/Azure/go-autorest/autorest/adal" ) @@ -53,17 +49,12 @@ const ApplicationID = "579a7132-0e58-4d80-b1e1-7a1e2d337859" // Resource used in azure storage OAuth authentication const Resource = "https://storage.azure.com" const MDResource = "https://disk.azure.com/" // There must be a trailing slash-- The service checks explicitly for "https://disk.azure.com/" -const DefaultTenantID = "common" -const DefaultActiveDirectoryEndpoint = "https://login.microsoftonline.com" -const IMDSAPIVersionArcVM = "2019-11-01" -const IMDSAPIVersionAzureVM = "2018-02-01" -const MSIEndpointAzureVM = "http://169.254.169.254/metadata/identity/oauth2/token" -const MSIEndpointArcVM = "http://127.0.0.1:40342/metadata/identity/oauth2/token" -// Refer to https://docs.microsoft.com/en-us/windows/win32/winsock/windows-sockets-error-codes-2 for details -const WSAECONNREFUSED = 10061 +const StorageScope = "https://storage.azure.com/.default" +const ManagedDiskScope = "https://disk.azure.com//.default" // There must be a trailing slash-- The service checks explicitly for "https://disk.azure.com/" -var DefaultTokenExpiryWithinThreshold = time.Minute * 10 +const DefaultTenantID = "common" +const DefaultActiveDirectoryEndpoint = "https://login.microsoftonline.com" // UserOAuthTokenManager for token management. type UserOAuthTokenManager struct { @@ -140,309 +131,92 @@ func (uotm *UserOAuthTokenManager) GetTokenInfo(ctx context.Context) (*OAuthToke return tokenInfo, nil } -// MSILogin tries to get token from MSI, persist indicates whether to cache the token on local disk. -func (uotm *UserOAuthTokenManager) MSILogin(ctx context.Context, identityInfo IdentityInfo, persist bool) (*OAuthTokenInfo, error) { - if err := identityInfo.Validate(); err != nil { - return nil, err +func (uotm *UserOAuthTokenManager) validateAndPersistLogin(oAuthTokenInfo *OAuthTokenInfo, persist bool) error { + // Use default tenant ID and active directory endpoint, if nothing specified. + if oAuthTokenInfo.Tenant == "" { + oAuthTokenInfo.Tenant = DefaultTenantID } - - oAuthTokenInfo := &OAuthTokenInfo{ - Identity: true, - IdentityInfo: identityInfo, + if oAuthTokenInfo.ActiveDirectoryEndpoint == "" { + oAuthTokenInfo.ActiveDirectoryEndpoint = DefaultActiveDirectoryEndpoint } - token, err := oAuthTokenInfo.GetNewTokenFromMSI(ctx) + tc, err := oAuthTokenInfo.GetTokenCredential() if err != nil { - return nil, err + return err + } + scopes := []string{StorageScope} + _, err = tc.GetToken(context.TODO(), policy.TokenRequestOptions{Scopes: scopes}) + if err != nil { + return err } - oAuthTokenInfo.Token = *token uotm.stashedInfo = oAuthTokenInfo - if persist { + if persist && err == nil { err = uotm.credCache.SaveToken(*oAuthTokenInfo) if err != nil { - return nil, err + return err } } - return oAuthTokenInfo, nil + return nil } -// secretLoginNoUOTM non-interactively logs in with a client secret. -func secretLoginNoUOTM(tenantID, activeDirectoryEndpoint, secret, applicationID, resource string) (*OAuthTokenInfo, error) { - if tenantID == "" { - tenantID = DefaultTenantID - } - - if activeDirectoryEndpoint == "" { - activeDirectoryEndpoint = DefaultActiveDirectoryEndpoint - } - - if applicationID == "" { - return nil, fmt.Errorf("please supply your OWN application ID") - } - - oAuthTokenInfo := OAuthTokenInfo{ - Tenant: tenantID, - ActiveDirectoryEndpoint: activeDirectoryEndpoint, - } - - oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) - if err != nil { - return nil, err - } - - spt, err := adal.NewServicePrincipalToken( - *oauthConfig, - applicationID, - secret, - resource, - ) - if err != nil { - return nil, err - } - - err = spt.Refresh() - if err != nil { - return nil, err +// MSILogin tries to get token from MSI, persist indicates whether to cache the token on local disk. +func (uotm *UserOAuthTokenManager) MSILogin(identityInfo IdentityInfo, persist bool) error { + if err := identityInfo.Validate(); err != nil { + return err } - // Due to the nature of SPA, no refresh token is given. - // Thus, no refresh token is copied or needed. - oAuthTokenInfo.Token = spt.Token() - oAuthTokenInfo.ApplicationID = applicationID - oAuthTokenInfo.ServicePrincipalName = true - oAuthTokenInfo.SPNInfo = SPNInfo{ - Secret: secret, - CertPath: "", + oAuthTokenInfo := &OAuthTokenInfo{ + Identity: true, + IdentityInfo: identityInfo, } - return &oAuthTokenInfo, nil + return uotm.validateAndPersistLogin(oAuthTokenInfo, persist) } // SecretLogin is a UOTM shell for secretLoginNoUOTM. -func (uotm *UserOAuthTokenManager) SecretLogin(tenantID, activeDirectoryEndpoint, secret, applicationID string, persist bool) (*OAuthTokenInfo, error) { - oAuthTokenInfo, err := secretLoginNoUOTM(tenantID, activeDirectoryEndpoint, secret, applicationID, Resource) - - if err != nil { - return nil, err - } - - uotm.stashedInfo = oAuthTokenInfo - if persist { - err = uotm.credCache.SaveToken(*oAuthTokenInfo) - if err != nil { - return nil, err - } - } - - return oAuthTokenInfo, nil -} - -// GetNewTokenFromSecret is a refresh shell for secretLoginNoUOTM -func (credInfo *OAuthTokenInfo) GetNewTokenFromSecret(ctx context.Context) (*adal.Token, error) { - targetResource := Resource - if credInfo.Token.Resource != "" && credInfo.Token.Resource != targetResource { - targetResource = credInfo.Token.Resource +func (uotm *UserOAuthTokenManager) SecretLogin(tenantID, activeDirectoryEndpoint, secret, applicationID string, persist bool) (error) { + oAuthTokenInfo := &OAuthTokenInfo{ + ServicePrincipalName: true, + Tenant: tenantID, + ActiveDirectoryEndpoint: activeDirectoryEndpoint, + ApplicationID: applicationID, + SPNInfo: SPNInfo{ + Secret: secret, + CertPath: "", + }, } - tokeninfo, err := secretLoginNoUOTM(credInfo.Tenant, credInfo.ActiveDirectoryEndpoint, credInfo.SPNInfo.Secret, credInfo.ApplicationID, targetResource) - - if err != nil { - return nil, err - } else { - return &tokeninfo.Token, nil - } + return uotm.validateAndPersistLogin(oAuthTokenInfo, persist) } -// Read a potentially encrypted PKCS block -func readPKCSBlock(block *pem.Block, secret []byte, parseFunc func([]byte) (interface{}, error)) (pk interface{}, err error) { - // Reduce code duplication by baking the parse functions into this - if x509.IsEncryptedPEMBlock(block) { //nolint:staticcheck - data, err := x509.DecryptPEMBlock(block, secret) //nolint:staticcheck - - if err == nil { - pk, err = parseFunc(data) - - if err != nil { - return nil, err - } - } else { - return nil, err - } - } else { - pk, err = parseFunc(block.Bytes) - - if err != nil { - return nil, err - } - } - return pk, err -} - -func certLoginNoUOTM(tenantID, activeDirectoryEndpoint, certPath, certPass, applicationID, resource string) (*OAuthTokenInfo, error) { +// CertLogin non-interactively logs in using a specified certificate, certificate password, and activedirectory endpoint. +func (uotm *UserOAuthTokenManager) CertLogin(tenantID, activeDirectoryEndpoint, certPath, certPass, applicationID string, persist bool) error { + // Use default tenant ID and active directory endpoint, if nothing specified. if tenantID == "" { tenantID = DefaultTenantID } - if activeDirectoryEndpoint == "" { activeDirectoryEndpoint = DefaultActiveDirectoryEndpoint } - - if applicationID == "" { - return nil, fmt.Errorf("please supply your OWN application ID") - } - - oAuthTokenInfo := OAuthTokenInfo{ + absCertPath, _ := filepath.Abs(certPath) + oAuthTokenInfo := &OAuthTokenInfo{ + ServicePrincipalName: true, Tenant: tenantID, ActiveDirectoryEndpoint: activeDirectoryEndpoint, + ApplicationID: applicationID, + SPNInfo: SPNInfo{ + Secret: certPass, + CertPath: absCertPath, + }, } - oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) - if err != nil { - return nil, err - } - - certData, err := os.ReadFile(certPath) - if err != nil { - return nil, err - } - - var pk interface{} - var cert *x509.Certificate - - if path.Ext(certPath) == ".pfx" || path.Ext(certPath) == ".pkcs12" || path.Ext(certPath) == ".p12" { - pk, cert, err = pkcs12.Decode(certData, certPass) - - if err != nil { - return nil, err - } - } else if path.Ext(certPath) == ".pem" { - block, rest := pem.Decode(certData) - - for len(rest) != 0 || pk == nil || cert == nil { - if block != nil { - switch block.Type { - case "ENCRYPTED PRIVATE KEY": - pk, err = readPKCSBlock(block, []byte(certPass), x509.ParsePKCS8PrivateKey) - - if err != nil { - return nil, fmt.Errorf("encrypted private key block has invalid format OR your cert password may be incorrect") - } - case "RSA PRIVATE KEY": - pkcs1wrap := func(d []byte) (pk interface{}, err error) { - return x509.ParsePKCS1PrivateKey(d) // Wrap this so that function signatures agree. - } - - pk, err = readPKCSBlock(block, []byte(certPass), pkcs1wrap) - - if err != nil { - return nil, fmt.Errorf("rsa private key block has invalid format OR your cert password may be incorrect") - } - case "PRIVATE KEY": - pk, err = readPKCSBlock(block, []byte(certPass), x509.ParsePKCS8PrivateKey) - - if err != nil { - return nil, fmt.Errorf("private key block has invalid format") - } - case "CERTIFICATE": - tmpcert, err := x509.ParseCertificate(block.Bytes) - - // Skip this certificate if it's invalid or is a CA cert - if err == nil && !tmpcert.IsCA { - cert = tmpcert - } - default: - // Ignore this part of the pem file, don't know what it is. - } - } else { - break - } - - if len(rest) == 0 { - break - } - - block, rest = pem.Decode(rest) - } - - if pk == nil || cert == nil { - return nil, fmt.Errorf("could not find the required information (private key & cert) in the supplied .pem file") - } - } else { - return nil, fmt.Errorf("please supply either a .pfx, .pkcs12, .p12, or a .pem file containing a private key and a certificate") - } - - p, ok := pk.(*rsa.PrivateKey) - if !ok { - return nil, fmt.Errorf("only RSA private keys are supported") - } - - spt, err := adal.NewServicePrincipalTokenFromCertificate( - *oauthConfig, - applicationID, - cert, - p, - resource, - ) - if err != nil { - return nil, err - } - - err = spt.Refresh() - if err != nil { - return nil, err - } - - cpfq, _ := filepath.Abs(certPath) - - oAuthTokenInfo.Token = spt.Token() - oAuthTokenInfo.RefreshToken = oAuthTokenInfo.Token.RefreshToken - oAuthTokenInfo.ApplicationID = applicationID - oAuthTokenInfo.ServicePrincipalName = true - oAuthTokenInfo.SPNInfo = SPNInfo{ - Secret: certPass, - CertPath: cpfq, - } - - return &oAuthTokenInfo, nil -} - -// CertLogin non-interactively logs in using a specified certificate, certificate password, and activedirectory endpoint. -func (uotm *UserOAuthTokenManager) CertLogin(tenantID, activeDirectoryEndpoint, certPath, certPass, applicationID string, persist bool) (*OAuthTokenInfo, error) { - // TODO: Global default cert flag for true non interactive login? - // (Also could be useful if the user has multiple certificates they want to switch between in the same file.) - oAuthTokenInfo, err := certLoginNoUOTM(tenantID, activeDirectoryEndpoint, certPath, certPass, applicationID, Resource) - uotm.stashedInfo = oAuthTokenInfo - - if persist && err == nil { - err = uotm.credCache.SaveToken(*oAuthTokenInfo) - if err != nil { - return nil, err - } - } - - return oAuthTokenInfo, err -} - -// GetNewTokenFromCert refreshes a token manually from a certificate. -func (credInfo *OAuthTokenInfo) GetNewTokenFromCert(ctx context.Context) (*adal.Token, error) { - targetResource := Resource - if credInfo.Token.Resource != "" && credInfo.Token.Resource != targetResource { - targetResource = credInfo.Token.Resource - } - - tokeninfo, err := certLoginNoUOTM(credInfo.Tenant, credInfo.ActiveDirectoryEndpoint, credInfo.SPNInfo.CertPath, credInfo.SPNInfo.Secret, credInfo.ApplicationID, targetResource) - - if err != nil { - return nil, err - } else { - return &tokeninfo.Token, nil - } + return uotm.validateAndPersistLogin(oAuthTokenInfo, persist) } // UserLogin interactively logins in with specified tenantID and activeDirectoryEndpoint, persist indicates whether to // cache the token on local disk. -func (uotm *UserOAuthTokenManager) UserLogin(tenantID, activeDirectoryEndpoint string, persist bool) (*OAuthTokenInfo, error) { +func (uotm *UserOAuthTokenManager) UserLogin(tenantID, activeDirectoryEndpoint string, persist bool) error { // Use default tenant ID and active directory endpoint, if nothing specified. if tenantID == "" { tenantID = DefaultTenantID @@ -454,7 +228,7 @@ func (uotm *UserOAuthTokenManager) UserLogin(tenantID, activeDirectoryEndpoint s // Init OAuth config oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) if err != nil { - return nil, err + return err } // Acquire the device code @@ -464,7 +238,7 @@ func (uotm *UserOAuthTokenManager) UserLogin(tenantID, activeDirectoryEndpoint s ApplicationID, Resource) if err != nil { - return nil, fmt.Errorf("failed to login with tenantID %q, Azure directory endpoint %q, %v", + return fmt.Errorf("failed to login with tenantID %q, Azure directory endpoint %q, %v", tenantID, activeDirectoryEndpoint, err) } @@ -480,7 +254,7 @@ func (uotm *UserOAuthTokenManager) UserLogin(tenantID, activeDirectoryEndpoint s // TODO: check if adal Go SDK has new method which supports context, currently ctrl-C can stop the login in console interactively. token, err := adal.WaitForUserCompletion(uotm.oauthClient, deviceCode) if err != nil { - return nil, fmt.Errorf("failed to login with tenantID %q, Azure directory endpoint %q, %v", + return fmt.Errorf("failed to login with tenantID %q, Azure directory endpoint %q, %v", tenantID, activeDirectoryEndpoint, err) } @@ -488,6 +262,7 @@ func (uotm *UserOAuthTokenManager) UserLogin(tenantID, activeDirectoryEndpoint s Token: *token, Tenant: tenantID, ActiveDirectoryEndpoint: activeDirectoryEndpoint, + ApplicationID: ApplicationID, } uotm.stashedInfo = &oAuthTokenInfo @@ -498,11 +273,11 @@ func (uotm *UserOAuthTokenManager) UserLogin(tenantID, activeDirectoryEndpoint s if persist { err = uotm.credCache.SaveToken(oAuthTokenInfo) if err != nil { - return nil, err + return err } } - return &oAuthTokenInfo, nil + return nil } // getCachedTokenInfo get a fresh token from local disk cache. @@ -619,6 +394,7 @@ const TokenRefreshSourceTokenStore = "tokenstore" // OAuthTokenInfo contains info necessary for refresh OAuth credentials. type OAuthTokenInfo struct { + azcore.TokenCredential `json:"-"` adal.Token Tenant string `json:"_tenant"` ActiveDirectoryEndpoint string `json:"_ad_endpoint"` @@ -672,27 +448,30 @@ func (identityInfo *IdentityInfo) Validate() error { // Refresh gets new token with token info. func (credInfo *OAuthTokenInfo) Refresh(ctx context.Context) (*adal.Token, error) { - if credInfo.TokenRefreshSource == TokenRefreshSourceTokenStore { - return credInfo.GetNewTokenFromTokenStore(ctx) - } - - if credInfo.Identity { - return credInfo.GetNewTokenFromMSI(ctx) + // TODO: I think this method is only necessary until datalake is migrated. + // Returns cached TokenCredential or creates a new one if it hasn't been created yet. + tc, err := credInfo.GetTokenCredential() + if err != nil { + return nil, err } - - if credInfo.ServicePrincipalName { - if credInfo.SPNInfo.CertPath != "" { - return credInfo.GetNewTokenFromCert(ctx) - } else { - return credInfo.GetNewTokenFromSecret(ctx) + if credInfo.TokenRefreshSource == "tokenstore" || credInfo.Identity || credInfo.ServicePrincipalName { + scopes := []string{StorageScope} + t, err := tc.GetToken(ctx, policy.TokenRequestOptions{Scopes: scopes}) + if err != nil { + return nil, err + } + return &adal.Token{ + AccessToken: t.Token, + ExpiresOn: json.Number(strconv.FormatInt(int64(t.ExpiresOn.Sub(date.UnixEpoch())/time.Second), 10)), + }, nil + } else { + if dcc, ok := tc.(*DeviceCodeCredential); ok { + return dcc.RefreshTokenWithUserCredential(ctx, Resource) } } - - return credInfo.RefreshTokenWithUserCredential(ctx) + return nil, errors.New("invalid token info") } -var msiTokenHTTPClient = newAzcopyHTTPClient() - // Single instance token store credential cache shared by entire azcopy process. var tokenStoreCredCache = NewCredCacheInternalIntegration(CredCacheOptions{ KeyName: "azcopy/aadtoken/" + strconv.Itoa(os.Getpid()), @@ -700,224 +479,152 @@ var tokenStoreCredCache = NewCredCacheInternalIntegration(CredCacheOptions{ AccountName: "aadtoken/" + strconv.Itoa(os.Getpid()), }) -// GetNewTokenFromTokenStore gets token from token store. (Credential Manager in Windows, keyring in Linux and keychain in MacOS.) -// Note: This approach should only be used in internal integrations. -func (credInfo *OAuthTokenInfo) GetNewTokenFromTokenStore(ctx context.Context) (*adal.Token, error) { - hasToken, err := tokenStoreCredCache.HasCachedToken() - if err != nil || !hasToken { - return nil, fmt.Errorf("no cached token found in Token Store Mode(SE), %v", err) +// IsEmpty returns if current OAuthTokenInfo is empty and doesn't contain any useful info. +func (credInfo OAuthTokenInfo) IsEmpty() bool { + if credInfo.Tenant == "" && credInfo.ActiveDirectoryEndpoint == "" && credInfo.Token.IsZero() && !credInfo.Identity { + return true } - tokenInfo, err := tokenStoreCredCache.LoadToken() - if err != nil { - return nil, fmt.Errorf("get cached token failed in Token Store Mode(SE), %v", err) - } + return false +} - return &(tokenInfo.Token), nil +// toJSON converts OAuthTokenInfo to json format. +func (credInfo OAuthTokenInfo) toJSON() ([]byte, error) { + return json.Marshal(credInfo) } -// queryIMDS sends a token request to the IMDS endpoint passed by the caller. This IMDS endpoint will be different for Azure and Arc VMs. -func (credInfo *OAuthTokenInfo) queryIMDS(ctx context.Context, msiEndpoint string, resource string, imdsAPIVersion string) (*http.Request, *http.Response, error) { - // Prepare request to get token from Azure Instance Metadata Service identity endpoint. - req, err := http.NewRequest("GET", msiEndpoint, nil) +func getAuthorityURL(tenantID, activeDirectoryEndpoint string) (*url.URL, error) { + u, err := url.Parse(activeDirectoryEndpoint) if err != nil { - return nil, nil, fmt.Errorf("failed to create request: %v", err) + return nil, err } + return u.Parse(tenantID) +} - params := req.URL.Query() - params.Set("resource", resource) - params.Set("api-version", imdsAPIVersion) +type TokenStoreCredential struct { +} - if credInfo.IdentityInfo.ClientID != "" { - params.Set("client_id", credInfo.IdentityInfo.ClientID) - } - if credInfo.IdentityInfo.ObjectID != "" { - params.Set("object_id", credInfo.IdentityInfo.ObjectID) - } - if credInfo.IdentityInfo.MSIResID != "" { - params.Set("msi_res_id", credInfo.IdentityInfo.MSIResID) - } - - req.URL.RawQuery = params.Encode() - req.Header.Set("Metadata", "true") - - // Set context. - req = req.WithContext(ctx) - // In case of some other process (Http Server) listening at 127.0.0.1:40342 , we do not want to wait forever for it to serve request - msiTokenHTTPClient.Timeout = 10 * time.Second - // Send request - resp, err := msiTokenHTTPClient.Do(req) - // Unset the timeout back - msiTokenHTTPClient.Timeout = 0 - return req, resp, err -} - -// isValidArcResponse checks if the key "Www-Authenticate" is unavailable in the header of an http response -func isValidArcResponse(resp *http.Response) bool { - wwwAuthenticateExists := false - if resp != nil && resp.Header != nil { - // Parameter for validity is whether "Www-Authenticate" exists in the response header - // "Www-Authenticate" contains the path to the challenge token file for Arc VMs - _, wwwAuthenticateExists = resp.Header["Www-Authenticate"] - } - - return wwwAuthenticateExists -} - -// fixupTokenJson corrects the value of JSON field "not_before" in the Byte slice from blank to a valid value and returns the corrected Byte slice. - -// Dated 15th Sep 2021. -// Token JSON returned by ARC-server endpoint API currently does not set a valid integral value for "not_before" key. -// If the token JSON already has "not_before" correctly set, this will be a no-op. -func fixupTokenJson(bytes []byte) []byte { - byteSliceToString := string(bytes) - separatorString := `"not_before":"` - stringSlice := strings.Split(byteSliceToString, separatorString) - - // OIDC token issuer returns an integer for "not_before" and not a string - if len(stringSlice) == 1 { - return bytes - } - - if stringSlice[1][0] != '"' { - return bytes - } - - // If the value of not_before is blank, set to "now - 5 sec" and return the updated slice - notBeforeTimeInteger := uint64(time.Now().Unix() - 5) - notBeforeTime := strconv.FormatUint(notBeforeTimeInteger, 10) - return []byte(stringSlice[0] + separatorString + notBeforeTime + stringSlice[1]) -} - -// GetNewTokenFromMSI gets token from Azure Instance Metadata Service identity endpoint. It first checks if the VM is registered with Azure Arc. Failing that case, it checks if it is an Azure VM. -// For details, please refer to https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview -// Note: The msiTokenHTTPClient timeout is has been reduced from 30 sec to 10 sec as IMDS endpoint is local to the machine. -// Without this change, if some router is configured to not return "ICMP unreachable" then it will take 30 secs to timeout and increase the response time. -// We are additionally checking Arc first, and then Azure VM because Arc endpoint is local so as to further reduce the response time of the Azure VM IMDS endpoint. -func (credInfo *OAuthTokenInfo) GetNewTokenFromMSI(ctx context.Context) (*adal.Token, error) { - targetResource := Resource - if credInfo.Token.Resource != "" && credInfo.Token.Resource != targetResource { - targetResource = credInfo.Token.Resource - } - - // Try Arc VM - req, resp, errArcVM := credInfo.queryIMDS(ctx, MSIEndpointArcVM, targetResource, IMDSAPIVersionArcVM) - if errArcVM != nil { - // Try Azure VM since there was an error in trying Arc VM - reqAzureVM, respAzureVM, errAzureVM := credInfo.queryIMDS(ctx, MSIEndpointAzureVM, targetResource, IMDSAPIVersionAzureVM) //nolint:staticcheck - if errAzureVM != nil { - var serr syscall.Errno - if errors.As(errArcVM, &serr) { - econnrefusedValue := -1 - switch runtime.GOOS { - case "linux": - econnrefusedValue = int(syscall.ECONNREFUSED) - case "windows": - econnrefusedValue = WSAECONNREFUSED - } - - if int(serr) == econnrefusedValue { - // If connection to Arc endpoint was refused - return nil, fmt.Errorf("please check whether MSI is enabled on this PC, to enable MSI please refer to https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-identity-on-an-existing-vm: %v", errAzureVM) - } - - // A syscall error other than ECONNREFUSED, implies we could not get the HTTP response - return nil, fmt.Errorf("error communicating with Arc IMDS endpoint (%s): %v", MSIEndpointArcVM, errArcVM) - } - - // queryIMDS failed, but not with a syscall error - // 1. Either it is an HTTP error, or - // 2. The HTTP request timed out - return nil, fmt.Errorf("invalid response received from Arc IMDS endpoint (%s), probably some unknown process listening: %v", MSIEndpointArcVM, errArcVM) - } +func (tsc *TokenStoreCredential) GetToken(_ context.Context, _ policy.TokenRequestOptions) (azcore.AccessToken, error) { + hasToken, err := tokenStoreCredCache.HasCachedToken() + if err != nil || !hasToken { + return azcore.AccessToken{}, fmt.Errorf("no cached token found in Token Store Mode(SE), %v", err) + } - // Arc IMDS failed with error, but Azure IMDS succeeded - req, resp = reqAzureVM, respAzureVM //nolint:staticcheck - } else if !isValidArcResponse(resp) { - // Not valid response from ARC IMDS endpoint. Perhaps some other process listening on it. Try Azure IMDS endpoint as fallback option. - reqAzureVM, respAzureVM, errAzureVM := credInfo.queryIMDS(ctx, MSIEndpointAzureVM, targetResource, IMDSAPIVersionAzureVM) //nolint:staticcheck - if errAzureVM != nil { - // Neither Arc nor Azure VM IMDS endpoint available. Can't use MSI. - return nil, fmt.Errorf("invalid response received from Arc IMDS endpoint (%s), probably some unknown process listening. If this an Azure VM, please check whether MSI is enabled, to enable MSI please refer to https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-identity-on-an-existing-vm: %v", MSIEndpointArcVM, errAzureVM) - } + tokenInfo, err := tokenStoreCredCache.LoadToken() + if err != nil { + return azcore.AccessToken{}, fmt.Errorf("get cached token failed in Token Store Mode(SE), %v", err) + } - // Azure VM IMDS endpoint ok! - req, resp = reqAzureVM, respAzureVM //nolint:staticcheck - } else { - // Valid response received from ARC IMDS endpoint. Proceed with the next step. - challengeTokenPath := strings.Split(resp.Header["Www-Authenticate"][0], "=")[1] - // Open the file. - challengeTokenFile, fileErr := os.Open(challengeTokenPath) - if os.IsPermission(fileErr) { - switch runtime.GOOS { - case "linux": - return nil, fmt.Errorf("permission level inadequate to read Arc challenge token file %s. Make sure you are running AzCopy as a user who is a member of the \"himds\" group or is superuser.", challengeTokenPath) - case "windows": - return nil, fmt.Errorf("permission level inadequate to read Arc challenge token file %s. Make sure you are running AzCopy as a user who is a member of the \"local Administrators\" group or the \"Hybrid Agent Extension Applications\" group.", challengeTokenPath) - default: - return nil, fmt.Errorf("error occurred while opening file %s in unsupported GOOS %s: %v", challengeTokenPath, runtime.GOOS, fileErr) - } - } else if fileErr != nil { - return nil, fmt.Errorf("error occurred while opening file %s: %v", challengeTokenPath, fileErr) - } + return azcore.AccessToken{ + Token: tokenInfo.AccessToken, + ExpiresOn: tokenInfo.Expires(), + }, nil - defer challengeTokenFile.Close() - // Create a new Reader for the file. - reader := bufio.NewReader(challengeTokenFile) - challengeToken, fileErr := reader.ReadString('\n') - if fileErr != nil && fileErr != io.EOF { - return nil, fmt.Errorf("error occurred while reading file %s: %v", challengeTokenPath, fileErr) - } +} - req.Header.Set("Authorization", "Basic "+challengeToken) +// GetNewTokenFromTokenStore gets token from token store. (Credential Manager in Windows, keyring in Linux and keychain in MacOS.) +// Note: This approach should only be used in internal integrations. +func (credInfo *OAuthTokenInfo) GetTokenStoreCredential() (azcore.TokenCredential, error) { + tc := &TokenStoreCredential{} + credInfo.TokenCredential = tc + return tc, nil +} - resp, errArcVM = msiTokenHTTPClient.Do(req) - if errArcVM != nil { - return nil, fmt.Errorf("failed to query token from Arc IMDS endpoint: %v", errArcVM) - } +func (credInfo *OAuthTokenInfo) GetManagedIdentityCredential() (azcore.TokenCredential, error) { + var id azidentity.ManagedIDKind + if credInfo.IdentityInfo.ClientID != "" { + id = azidentity.ClientID(credInfo.IdentityInfo.ClientID) + } else if credInfo.IdentityInfo.MSIResID != "" { + id = azidentity.ResourceID(credInfo.IdentityInfo.ObjectID) + } else if credInfo.IdentityInfo.ObjectID != "" { + return nil, fmt.Errorf("object ID is deprecated and no longer supported for managed identity. Please use client ID or resource ID instead") } - defer func() { // resp and Body should not be nil - _, _ = io.Copy(io.Discard, resp.Body) - resp.Body.Close() - }() + tc, err := azidentity.NewManagedIdentityCredential(&azidentity.ManagedIdentityCredentialOptions{ + ClientOptions: azcore.ClientOptions{ + Transport: newAzcopyHTTPClient(), + }, + ID: id, + }) + if err != nil { + return nil, err + } + credInfo.TokenCredential = tc + return tc, nil +} - // Check if the status code indicates success - // The request returns 200 currently, add 201 and 202 as well for possible extension. - if !(HTTPResponseExtension{Response: resp}).IsSuccessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted) { - return nil, fmt.Errorf("failed to get token from msi, status code: %v", resp.StatusCode) +func (credInfo *OAuthTokenInfo) GetClientCertificateCredential() (azcore.TokenCredential, error) { + authorityHost, err := getAuthorityURL(credInfo.Tenant, credInfo.ActiveDirectoryEndpoint) + if err != nil { + return nil, err + } + certData, err := os.ReadFile(credInfo.SPNInfo.CertPath) + if err != nil { + return nil, err } + certs, key, err := azidentity.ParseCertificates(certData, []byte(credInfo.SPNInfo.Secret)) + if err != nil { + return nil, err + } + tc, err := azidentity.NewClientCertificateCredential(credInfo.Tenant, credInfo.ApplicationID, certs, key, &azidentity.ClientCertificateCredentialOptions{ + ClientOptions: azcore.ClientOptions{ + Cloud: cloud.Configuration{ActiveDirectoryAuthorityHost: authorityHost.String()}, + Transport: newAzcopyHTTPClient(), + }, + }) + if err != nil { + return nil, err + } + credInfo.TokenCredential = tc + return tc, nil +} - b, err := io.ReadAll(resp.Body) +func (credInfo *OAuthTokenInfo) GetClientSecretCredential() (azcore.TokenCredential, error) { + authorityHost, err := getAuthorityURL(credInfo.Tenant, credInfo.ActiveDirectoryEndpoint) if err != nil { return nil, err } + tc, err := azidentity.NewClientSecretCredential(credInfo.Tenant, credInfo.ApplicationID, credInfo.SPNInfo.Secret, &azidentity.ClientSecretCredentialOptions{ + ClientOptions: azcore.ClientOptions{ + Cloud: cloud.Configuration{ActiveDirectoryAuthorityHost: authorityHost.String()}, + Transport: newAzcopyHTTPClient(), + }, + }) + if err != nil { + return nil, err + } + credInfo.TokenCredential = tc + return tc, nil +} + +type DeviceCodeCredential struct { + token adal.Token + aadEndpoint string + tenantID string + clientID string +} - result := &adal.Token{} - if len(b) > 0 { - b = ByteSliceExtension{ByteSlice: b}.RemoveBOM() - // Unmarshal will give an error for Go version >= 1.14 for a field with blank values. Arc-server endpoint API returns blank for "not_before" field. - // TODO: Remove fixup once Arc team fixes the issue. - b = fixupTokenJson(b) - if err := json.Unmarshal(b, result); err != nil { - return nil, fmt.Errorf("failed to unmarshal response body: %v", err) +func (dcc *DeviceCodeCredential) GetToken(ctx context.Context, options policy.TokenRequestOptions) (azcore.AccessToken, error) { + waitDuration := dcc.token.Expires().Sub(time.Now().UTC()) / 2 + if dcc.token.WillExpireIn(waitDuration) { + resource := strings.TrimSuffix(options.Scopes[0], "/.default") + _, err := dcc.RefreshTokenWithUserCredential(ctx, resource) + if err != nil { + return azcore.AccessToken{}, err } - } else { - return nil, errors.New("failed to get token from msi") } - - return result, nil + return azcore.AccessToken{Token: dcc.token.AccessToken, ExpiresOn: dcc.token.Expires()}, nil } // RefreshTokenWithUserCredential gets new token with user credential through refresh. -func (credInfo *OAuthTokenInfo) RefreshTokenWithUserCredential(ctx context.Context) (*adal.Token, error) { - targetResource := Resource - if credInfo.Token.Resource != "" && credInfo.Token.Resource != targetResource { - targetResource = credInfo.Token.Resource +func (dcc *DeviceCodeCredential) RefreshTokenWithUserCredential(ctx context.Context, resource string) (*adal.Token, error) { + targetResource := resource + if dcc.token.Resource != "" && dcc.token.Resource != targetResource { + targetResource = dcc.token.Resource } - oauthConfig, err := adal.NewOAuthConfig(credInfo.ActiveDirectoryEndpoint, credInfo.Tenant) + oauthConfig, err := adal.NewOAuthConfig(dcc.aadEndpoint, dcc.tenantID) if err != nil { return nil, err } @@ -926,9 +633,9 @@ func (credInfo *OAuthTokenInfo) RefreshTokenWithUserCredential(ctx context.Conte // Use AzCopy's 1st party applicationID for refresh by default. spt, err := adal.NewServicePrincipalTokenFromManualToken( *oauthConfig, - IffString(credInfo.ClientID != "", credInfo.ClientID, ApplicationID), + Iff(dcc.clientID != "", dcc.clientID, ApplicationID), targetResource, - credInfo.Token) + dcc.token) if err != nil { return nil, err } @@ -938,21 +645,39 @@ func (credInfo *OAuthTokenInfo) RefreshTokenWithUserCredential(ctx context.Conte } newToken := spt.Token() + dcc.token = newToken return &newToken, nil } -// IsEmpty returns if current OAuthTokenInfo is empty and doesn't contain any useful info. -func (credInfo OAuthTokenInfo) IsEmpty() bool { - if credInfo.Tenant == "" && credInfo.ActiveDirectoryEndpoint == "" && credInfo.Token.IsZero() && !credInfo.Identity { - return true +func (credInfo *OAuthTokenInfo) GetDeviceCodeCredential() (azcore.TokenCredential, error) { + tc := &DeviceCodeCredential{token: credInfo.Token, aadEndpoint: credInfo.ActiveDirectoryEndpoint, tenantID: credInfo.Tenant, clientID: credInfo.ApplicationID} + credInfo.TokenCredential = tc + return tc, nil +} + +func (credInfo *OAuthTokenInfo) GetTokenCredential() (azcore.TokenCredential, error) { + // Token Credential is cached. + if credInfo.TokenCredential != nil { + return credInfo.TokenCredential, nil } - return false -} + if credInfo.TokenRefreshSource == TokenRefreshSourceTokenStore { + return credInfo.GetTokenStoreCredential() + } -// toJSON converts OAuthTokenInfo to json format. -func (credInfo OAuthTokenInfo) toJSON() ([]byte, error) { - return json.Marshal(credInfo) + if credInfo.Identity { + return credInfo.GetManagedIdentityCredential() + } + + if credInfo.ServicePrincipalName { + if credInfo.SPNInfo.CertPath != "" { + return credInfo.GetClientCertificateCredential() + } else { + return credInfo.GetClientSecretCredential() + } + } + + return credInfo.GetDeviceCodeCredential() } // jsonToTokenInfo converts bytes to OAuthTokenInfo diff --git a/common/prologueState.go b/common/prologueState.go index f72d1b80c..400863253 100644 --- a/common/prologueState.go +++ b/common/prologueState.go @@ -32,7 +32,7 @@ type PrologueState struct { LeadingBytes []byte } -func (ps PrologueState) GetInferredContentType(jptm cutdownJptm) string { +func (ps PrologueState) GetInferredContentType(jptm cutdownJptm) *string { headers, _, _, _ := jptm.ResourceDstData(ps.LeadingBytes) - return headers.ContentType + return &headers.ContentType } diff --git a/common/rpc-models.go b/common/rpc-models.go index 8b6d33580..80391fe22 100644 --- a/common/rpc-models.go +++ b/common/rpc-models.go @@ -1,12 +1,13 @@ package common import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "net/url" "reflect" "strings" "time" - "github.com/Azure/azure-storage-blob-go/azblob" "github.com/JeffreyRichter/enum/enum" ) @@ -74,6 +75,15 @@ func (r ResourceString) FullURL() (*url.URL, error) { return u, err } +func (r ResourceString) String() (string, error) { + u, err := r.FullURL() + if err != nil { + return "", err + } else { + return u.String(), nil + } +} + // to be used when the value is assumed to be a local path // Using this signals "Yes, I really am ignoring the SAS and ExtraQuery on purpose", // and will result in a panic in the case of programmer error of calling this method @@ -131,7 +141,7 @@ type CopyJobPartOrderRequest struct { Fpo FolderPropertyOption // passed in from front-end to ensure that front-end and STE agree on the desired behaviour for the job SymlinkHandlingType SymlinkHandlingType // list of blobTypes to exclude. - ExcludeBlobType []azblob.BlobType + ExcludeBlobType []blob.BlobType SourceRoot ResourceString DestinationRoot ResourceString @@ -164,11 +174,11 @@ type CopyJobPartOrderRequest struct { // CredentialInfo contains essential credential info which need be transited between modules, // and used during creating Azure storage client Credential. type CredentialInfo struct { - CredentialType CredentialType - OAuthTokenInfo OAuthTokenInfo - S3CredentialInfo S3CredentialInfo - GCPCredentialInfo GCPCredentialInfo - SourceBlobToken azblob.Credential + CredentialType CredentialType + OAuthTokenInfo OAuthTokenInfo + S3CredentialInfo S3CredentialInfo + GCPCredentialInfo GCPCredentialInfo + S2SSourceTokenCredential azcore.TokenCredential } func (c CredentialInfo) WithType(credentialType CredentialType) CredentialInfo { diff --git a/common/s3Models.go b/common/s3Models.go index 180a9a557..861106f33 100644 --- a/common/s3Models.go +++ b/common/s3Models.go @@ -78,7 +78,8 @@ func (oie *ObjectInfoExtension) NewCommonMetadata() Metadata { for k, v := range oie.ObjectInfo.Metadata { if len(k) > s3MetadataPrefixLen { if prefix := k[0:s3MetadataPrefixLen]; strings.EqualFold(prefix, s3MetadataPrefix) { - md[k[s3MetadataPrefixLen:]] = v[0] + value := v[0] + md[k[s3MetadataPrefixLen:]] = &value } } } diff --git a/common/unixStatAdapter.go b/common/unixStatAdapter.go index 8e05e84cb..30c3cd8c8 100644 --- a/common/unixStatAdapter.go +++ b/common/unixStatAdapter.go @@ -1,7 +1,6 @@ package common import ( - "github.com/Azure/azure-storage-blob-go/azblob" "os" "strconv" "time" @@ -163,11 +162,11 @@ func (u UnixStatContainer) CTime() time.Time { // ReadStatFromMetadata is not fault-tolerant. If any given article does not parse, // it will throw an error instead of continuing on, as it may be considered incorrect to attempt to persist the rest of the data. // despite this function being used only in Downloads at the current moment, it still attempts to re-create as complete of a UnixStatAdapter as possible. -func ReadStatFromMetadata(metadata azblob.Metadata, contentLength int64) (UnixStatAdapter, error) { +func ReadStatFromMetadata(metadata Metadata, contentLength int64) (UnixStatAdapter, error) { s := UnixStatContainer{size: uint64(contentLength)} if mask, ok := metadata[LINUXStatxMaskMeta]; ok { - m, err := strconv.ParseUint(mask, 10, 32) + m, err := strconv.ParseUint(*mask, 10, 32) if err != nil { return s, err } @@ -177,7 +176,7 @@ func ReadStatFromMetadata(metadata azblob.Metadata, contentLength int64) (UnixSt // cover additional statx properties here if attr, ok := metadata[LINUXAttributeMeta]; ok { - a, err := strconv.ParseUint(attr, 10, 64) + a, err := strconv.ParseUint(*attr, 10, 64) if err != nil { return s, err } @@ -185,7 +184,7 @@ func ReadStatFromMetadata(metadata azblob.Metadata, contentLength int64) (UnixSt } if attr, ok := metadata[LINUXAttributeMaskMeta]; ok { - a, err := strconv.ParseUint(attr, 10, 64) + a, err := strconv.ParseUint(*attr, 10, 64) if err != nil { return s, err } @@ -193,7 +192,7 @@ func ReadStatFromMetadata(metadata azblob.Metadata, contentLength int64) (UnixSt } if btime, ok := metadata[LINUXBTimeMeta]; ok { - b, err := strconv.ParseInt(btime, 10, 64) + b, err := strconv.ParseInt(*btime, 10, 64) if err != nil { return s, err } @@ -202,7 +201,7 @@ func ReadStatFromMetadata(metadata azblob.Metadata, contentLength int64) (UnixSt // base stat properties if nlink, ok := metadata[POSIXNlinkMeta]; ok { - n, err := strconv.ParseUint(nlink, 10, 64) + n, err := strconv.ParseUint(*nlink, 10, 64) if err != nil { return s, err } @@ -210,7 +209,7 @@ func ReadStatFromMetadata(metadata azblob.Metadata, contentLength int64) (UnixSt } if owner, ok := metadata[POSIXOwnerMeta]; ok { - o, err := strconv.ParseUint(owner, 10, 32) + o, err := strconv.ParseUint(*owner, 10, 32) if err != nil { return s, err } @@ -218,7 +217,7 @@ func ReadStatFromMetadata(metadata azblob.Metadata, contentLength int64) (UnixSt } if group, ok := metadata[POSIXGroupMeta]; ok { - g, err := strconv.ParseUint(group, 10, 32) + g, err := strconv.ParseUint(*group, 10, 32) if err != nil { return s, err } @@ -226,7 +225,7 @@ func ReadStatFromMetadata(metadata azblob.Metadata, contentLength int64) (UnixSt } if mode, ok := metadata[POSIXModeMeta]; ok { - m, err := strconv.ParseUint(mode, 10, 32) + m, err := strconv.ParseUint(*mode, 10, 32) if err != nil { return s, err } @@ -235,7 +234,7 @@ func ReadStatFromMetadata(metadata azblob.Metadata, contentLength int64) (UnixSt } if inode, ok := metadata[POSIXINodeMeta]; ok { - ino, err := strconv.ParseUint(inode, 10, 64) + ino, err := strconv.ParseUint(*inode, 10, 64) if err != nil { return s, err } @@ -244,7 +243,7 @@ func ReadStatFromMetadata(metadata azblob.Metadata, contentLength int64) (UnixSt } if dev, ok := metadata[POSIXDevMeta]; ok { - d, err := strconv.ParseUint(dev, 10, 64) + d, err := strconv.ParseUint(*dev, 10, 64) if err != nil { return s, err } @@ -253,7 +252,7 @@ func ReadStatFromMetadata(metadata azblob.Metadata, contentLength int64) (UnixSt } if rdev, ok := metadata[POSIXRDevMeta]; ok { - rd, err := strconv.ParseUint(rdev, 10, 64) + rd, err := strconv.ParseUint(*rdev, 10, 64) if err != nil { return s, err } @@ -262,7 +261,7 @@ func ReadStatFromMetadata(metadata azblob.Metadata, contentLength int64) (UnixSt } if atime, ok := metadata[POSIXATimeMeta]; ok { - at, err := strconv.ParseInt(atime, 10, 64) + at, err := strconv.ParseInt(*atime, 10, 64) if err != nil { return s, err } @@ -271,7 +270,7 @@ func ReadStatFromMetadata(metadata azblob.Metadata, contentLength int64) (UnixSt } if mtime, ok := metadata[POSIXModTimeMeta]; ok { - mt, err := strconv.ParseInt(mtime, 10, 64) + mt, err := strconv.ParseInt(*mtime, 10, 64) if err != nil { return s, err } @@ -280,7 +279,7 @@ func ReadStatFromMetadata(metadata azblob.Metadata, contentLength int64) (UnixSt } if ctime, ok := metadata[POSIXCTimeMeta]; ok { - ct, err := strconv.ParseInt(ctime, 10, 64) + ct, err := strconv.ParseInt(*ctime, 10, 64) if err != nil { return s, err } @@ -337,21 +336,21 @@ const ( // Values cloned from x/sys/unix to avoid dependency S_ALLPERM = 0x777 ) -func ClearStatFromBlobMetadata(metadata azblob.Metadata) { +func ClearStatFromBlobMetadata(metadata Metadata) { for _, v := range AllLinuxProperties { delete(metadata, v) } } -func AddStatToBlobMetadata(s UnixStatAdapter, metadata azblob.Metadata) { +func AddStatToBlobMetadata(s UnixStatAdapter, metadata Metadata) { if s == nil { return } applyMode := func(mode os.FileMode) { - modes := map[uint32]string { - S_IFCHR: POSIXCharDeviceMeta, - S_IFBLK: POSIXBlockDeviceMeta, + modes := map[uint32]string{ + S_IFCHR: POSIXCharDeviceMeta, + S_IFBLK: POSIXBlockDeviceMeta, S_IFSOCK: POSIXSocketMeta, S_IFIFO: POSIXFIFOMeta, S_IFDIR: POSIXFolderMeta, @@ -359,7 +358,7 @@ func AddStatToBlobMetadata(s UnixStatAdapter, metadata azblob.Metadata) { } for modeToTest, metaToApply := range modes { - if mode & os.FileMode(modeToTest) == os.FileMode(modeToTest) { + if mode&os.FileMode(modeToTest) == os.FileMode(modeToTest) { tryAddMetadata(metadata, metaToApply, "true") } } @@ -440,10 +439,11 @@ func StatXReturned(mask uint32, want uint32) bool { return (mask & want) == want } -func tryAddMetadata(metadata azblob.Metadata, key, value string) { +func tryAddMetadata(metadata Metadata, key, value string) { if _, ok := metadata[key]; ok { return // Don't overwrite the user's metadata } - metadata[key] = value + v := value + metadata[key] = &v } diff --git a/common/version.go b/common/version.go index e001520ee..becef2f10 100644 --- a/common/version.go +++ b/common/version.go @@ -1,6 +1,6 @@ package common -const AzcopyVersion = "10.20.1" +const AzcopyVersion = "10.21.1-Preview" const UserAgent = "AzCopy/" + AzcopyVersion const S3ImportUserAgent = "S3Import " + UserAgent const GCPImportUserAgent = "GCPImport " + UserAgent diff --git a/e2etest/arm.go b/e2etest/arm.go index 1cc3f9c62..f638f5933 100644 --- a/e2etest/arm.go +++ b/e2etest/arm.go @@ -3,7 +3,7 @@ package e2etest import ( "encoding/json" "fmt" - "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "io" "net/http" "reflect" @@ -33,7 +33,7 @@ const ( ARMStatusCanceled = "Canceled" ) -func ResolveAzureAsyncOperation(OAuth *adal.ServicePrincipalToken, uri string, properties interface{}) (armResp *ARMAsyncResponse, err error) { +func ResolveAzureAsyncOperation(OAuth *azcore.AccessToken, uri string, properties interface{}) (armResp *ARMAsyncResponse, err error) { if properties != nil && reflect.TypeOf(properties).Kind() != reflect.Ptr { return nil, fmt.Errorf("properties must be a pointer (or nil)") } @@ -43,7 +43,7 @@ func ResolveAzureAsyncOperation(OAuth *adal.ServicePrincipalToken, uri string, p return nil, fmt.Errorf("failed to create request: %w", err) } - req.Header["Authorization"] = []string{"Bearer " + OAuth.OAuthToken()} + req.Header["Authorization"] = []string{"Bearer " + OAuth.Token} var resp *http.Response for { diff --git a/e2etest/config.go b/e2etest/config.go index 5cc70d50f..a849c69ff 100644 --- a/e2etest/config.go +++ b/e2etest/config.go @@ -21,9 +21,13 @@ package e2etest import ( + "context" "encoding/json" "fmt" - "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "os" "reflect" "strconv" @@ -129,7 +133,7 @@ type ManagedDiskConfig struct { SubscriptionID string ResourceGroupName string DiskName string - oauth *adal.ServicePrincipalToken + oauth *azcore.AccessToken } func (gim GlobalInputManager) GetMDConfig(accountType AccountType) (*ManagedDiskConfig, error) { @@ -155,7 +159,7 @@ func (gim GlobalInputManager) GetMDConfig(accountType AccountType) (*ManagedDisk return nil, fmt.Errorf("failed to parse config") // Outputting the error may reveal semi-sensitive info like subscription ID } - out.oauth, err = gim.GetOAuthCredential("https://management.core.windows.net/") + out.oauth, err = gim.GetOAuthCredential("https://management.core.windows.net/.default") if err != nil { return nil, fmt.Errorf("failed to refresh oauth token: %w", err) } @@ -163,30 +167,22 @@ func (gim GlobalInputManager) GetMDConfig(accountType AccountType) (*ManagedDisk return &out, nil } -func (gim GlobalInputManager) GetOAuthCredential(resource string) (*adal.ServicePrincipalToken, error) { - tenant, appID, clientSecret := gim.GetServicePrincipalAuth() +func (gim GlobalInputManager) GetOAuthCredential(resource string) (*azcore.AccessToken, error) { + tenantID, applicationID, secret := gim.GetServicePrincipalAuth() + activeDirectoryEndpoint := "https://login.microsoftonline.com" - var spt *adal.ServicePrincipalToken - - oauthConfig, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenant) + spn, err := azidentity.NewClientSecretCredential(tenantID, applicationID, secret, &azidentity.ClientSecretCredentialOptions{ + ClientOptions: azcore.ClientOptions{ + Cloud: cloud.Configuration{ActiveDirectoryAuthorityHost: activeDirectoryEndpoint}, + }, + }) if err != nil { return nil, err } - spt, err = adal.NewServicePrincipalToken( // initialize the token - *oauthConfig, - appID, - clientSecret, - resource, - ) - if err != nil { - return nil, err - } + scopes := []string{resource} - err = spt.Refresh() // grab a token and return it. - if err != nil { - return nil, err - } + accessToken, err := spn.GetToken(context.TODO(), policy.TokenRequestOptions{Scopes: scopes}) - return spt, nil + return &accessToken, nil } diff --git a/e2etest/declarativeHelpers.go b/e2etest/declarativeHelpers.go index 7ed2c86a6..49b48e190 100644 --- a/e2etest/declarativeHelpers.go +++ b/e2etest/declarativeHelpers.go @@ -21,7 +21,7 @@ package e2etest import ( - "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "reflect" "strings" "testing" @@ -169,7 +169,7 @@ type params struct { isObjectDir bool debugSkipFiles []string // a list of localized filepaths to skip over on the first run in the STE. s2sPreserveAccessTier bool - accessTier azblob.AccessTierType + accessTier *blob.AccessTier checkMd5 common.HashValidationOption compareHash common.SyncHashType hashStorageMode common.HashStorageMode @@ -533,6 +533,9 @@ type hookHelper interface { // GetDestination returns the destination Resource Manager GetDestination() resourceManager + + // GetSource returns the source Resource Manager + GetSource() resourceManager } // ///// diff --git a/e2etest/declarativeResourceAdapters.go b/e2etest/declarativeResourceAdapters.go index f992e8569..6acf5cd1b 100644 --- a/e2etest/declarativeResourceAdapters.go +++ b/e2etest/declarativeResourceAdapters.go @@ -21,9 +21,11 @@ package e2etest import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" "github.com/Azure/azure-storage-azcopy/v10/sddl" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" + "github.com/Azure/azure-storage-azcopy/v10/ste" ) func sval(s *string) string { @@ -43,102 +45,79 @@ type blobResourceAdapter struct { obj *testObject } -func (a blobResourceAdapter) toHeaders() azblob.BlobHTTPHeaders { +func (a blobResourceAdapter) toHeaders() *blob.HTTPHeaders { props := a.obj.creationProperties.contentHeaders if props == nil { - return azblob.BlobHTTPHeaders{} + return nil } - return azblob.BlobHTTPHeaders{ - ContentType: sval(props.contentType), - ContentMD5: props.contentMD5, - ContentEncoding: sval(props.contentEncoding), - ContentLanguage: sval(props.contentLanguage), - ContentDisposition: sval(props.contentDisposition), - CacheControl: sval(props.cacheControl), + return &blob.HTTPHeaders{ + BlobContentType: props.contentType, + BlobContentMD5: props.contentMD5, + BlobContentEncoding: props.contentEncoding, + BlobContentLanguage: props.contentLanguage, + BlobContentDisposition: props.contentDisposition, + BlobCacheControl: props.cacheControl, } } -func (a blobResourceAdapter) toMetadata() azblob.Metadata { - if a.obj.creationProperties.nameValueMetadata == nil { - a.obj.creationProperties.nameValueMetadata = azblob.Metadata{} - } - - if a.obj.creationProperties.posixProperties != nil { - a.obj.creationProperties.posixProperties.AddToMetadata(a.obj.creationProperties.nameValueMetadata) - } - - return a.obj.creationProperties.nameValueMetadata +type filesResourceAdapter struct { + obj *testObject } -func (a blobResourceAdapter) toBlobTags() azblob.BlobTagsMap { - if a.obj.creationProperties.blobTags == nil { - return azblob.BlobTagsMap{} +func (a filesResourceAdapter) toSMBProperties(c asserter) *file.SMBProperties { + return &file.SMBProperties{ + Attributes: a.toAttributes(c), + LastWriteTime: a.obj.creationProperties.lastWriteTime, } - return azblob.BlobTagsMap(a.obj.creationProperties.blobTags) } -//// - -type filesResourceAdapter struct { - obj *testObject +func (a filesResourceAdapter) toAttributes(c asserter) *file.NTFSFileAttributes { + if a.obj.creationProperties.smbAttributes != nil { + attr, err := ste.FileAttributesFromUint32(*a.obj.creationProperties.smbAttributes) + c.AssertNoErr(err) + return attr + } + return nil } -func (a filesResourceAdapter) toHeaders(c asserter, share azfile.ShareURL) azfile.FileHTTPHeaders { - headers := azfile.FileHTTPHeaders{} - +func (a filesResourceAdapter) toPermissions(c asserter, shareClient *share.Client) *file.Permissions { if a.obj.creationProperties.smbPermissionsSddl != nil { + permissions := file.Permissions{} parsedSDDL, err := sddl.ParseSDDL(*a.obj.creationProperties.smbPermissionsSddl) c.AssertNoErr(err, "Failed to parse SDDL") var permKey string if len(parsedSDDL.PortableString()) > 8000 { - createPermResp, err := share.CreatePermission(ctx, parsedSDDL.PortableString()) + createPermResp, err := shareClient.CreatePermission(ctx, parsedSDDL.PortableString(), nil) c.AssertNoErr(err) - permKey = createPermResp.FilePermissionKey() + permKey = *createPermResp.FilePermissionKey } - var smbprops azfile.SMBProperties - if permKey != "" { - smbprops.PermissionKey = &permKey + permissions.PermissionKey = &permKey } else { perm := parsedSDDL.PortableString() - smbprops.PermissionString = &perm + permissions.Permission = &perm } - - headers.SMBProperties = smbprops - } - - if a.obj.creationProperties.smbAttributes != nil { - attribs := azfile.FileAttributeFlags(*a.obj.creationProperties.smbAttributes) - headers.SMBProperties.FileAttributes = &attribs - } - - if a.obj.creationProperties.lastWriteTime != nil { - lwt := *a.obj.creationProperties.lastWriteTime - headers.SMBProperties.FileLastWriteTime = &lwt + return &permissions } + return nil +} +func (a filesResourceAdapter) toHeaders() *file.HTTPHeaders { props := a.obj.creationProperties.contentHeaders if props == nil { - return headers + return nil } - headers.ContentType = sval(props.contentType) - headers.ContentMD5 = props.contentMD5 - headers.ContentEncoding = sval(props.contentEncoding) - headers.ContentLanguage = sval(props.contentLanguage) - headers.ContentDisposition = sval(props.contentDisposition) - headers.CacheControl = sval(props.cacheControl) - - return headers -} - -func (a filesResourceAdapter) toMetadata() azfile.Metadata { - if a.obj.creationProperties.nameValueMetadata == nil { - return azfile.Metadata{} + return &file.HTTPHeaders{ + ContentType: props.contentType, + ContentMD5: props.contentMD5, + ContentEncoding: props.contentEncoding, + ContentLanguage: props.contentLanguage, + ContentDisposition: props.contentDisposition, + CacheControl: props.cacheControl, } - return a.obj.creationProperties.nameValueMetadata } diff --git a/e2etest/declarativeResourceManagers.go b/e2etest/declarativeResourceManagers.go index 04249d3ab..f49f9f9e1 100644 --- a/e2etest/declarativeResourceManagers.go +++ b/e2etest/declarativeResourceManagers.go @@ -21,6 +21,10 @@ package e2etest import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" "github.com/Azure/azure-storage-azcopy/v10/azbfs" "net/url" "os" @@ -29,8 +33,6 @@ import ( "strings" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" ) func assertNoStripTopDir(stripTopDir bool) { @@ -46,13 +48,13 @@ type downloadContentOptions struct { } type downloadBlobContentOptions struct { - containerURL azblob.ContainerURL - cpkInfo common.CpkInfo - cpkScopeInfo common.CpkScopeInfo + containerClient *container.Client + cpkInfo *blob.CPKInfo + cpkScopeInfo *blob.CPKScopeInfo } type downloadFileContentOptions struct { - shareURL azfile.ShareURL + shareClient *share.Client } // TODO: any better names for this? @@ -121,8 +123,8 @@ func (r *resourceLocal) createFiles(a asserter, s *scenario, isSource bool) { scenarioHelper{}.generateLocalFilesFromList(a, &generateLocalFilesFromList{ dirPath: r.dirPath, generateFromListOptions: generateFromListOptions{ - fs: s.fs.allObjects(isSource), - defaultSize: s.fs.defaultSize, + fs: s.fs.allObjects(isSource), + defaultSize: s.fs.defaultSize, preservePosixProperties: s.p.preservePOSIXProperties, }, }) @@ -204,15 +206,17 @@ func (r *resourceLocal) createSourceSnapshot(a asserter) { // ///// type resourceBlobContainer struct { - accountType AccountType - containerURL *azblob.ContainerURL - rawSasURL *url.URL + accountType AccountType + containerClient *container.Client + rawSasURL *url.URL } func (r *resourceBlobContainer) createLocation(a asserter, s *scenario) { cu, _, rawSasURL := TestResourceFactory{}.CreateNewContainer(a, s.GetTestFiles().sourcePublic, r.accountType) - r.containerURL = &cu - r.rawSasURL = &rawSasURL + r.containerClient = cu + rawURL, err := url.Parse(rawSasURL) + a.AssertNoErr(err) + r.rawSasURL = rawURL if s.GetModifiableParameters().relativeSourcePath != "" { r.appendSourcePath(s.GetModifiableParameters().relativeSourcePath, true) } @@ -220,8 +224,8 @@ func (r *resourceBlobContainer) createLocation(a asserter, s *scenario) { func (r *resourceBlobContainer) createFiles(a asserter, s *scenario, isSource bool) { options := &generateBlobFromListOptions{ - rawSASURL: *r.rawSasURL, - containerURL: *r.containerURL, + rawSASURL: *r.rawSasURL, + containerClient: r.containerClient, generateFromListOptions: generateFromListOptions{ fs: s.fs.allObjects(isSource), defaultSize: s.fs.defaultSize, @@ -239,7 +243,8 @@ func (r *resourceBlobContainer) createFiles(a asserter, s *scenario, isSource bo // set root ACL if r.accountType == EAccountType.HierarchicalNamespaceEnabled() { - containerURLParts := azblob.NewBlobURLParts(r.containerURL.URL()) + containerURLParts, err := blob.ParseURL(r.containerClient.URL()) + a.AssertNoErr(err) for _,v := range options.generateFromListOptions.fs { if v.name == "" { @@ -262,7 +267,7 @@ func (r *resourceBlobContainer) createFiles(a asserter, s *scenario, isSource bo func (r *resourceBlobContainer) createFile(a asserter, o *testObject, s *scenario, isSource bool) { options := &generateBlobFromListOptions{ - containerURL: *r.containerURL, + containerClient: r.containerClient, generateFromListOptions: generateFromListOptions{ fs: []*testObject{o}, defaultSize: s.fs.defaultSize, @@ -278,32 +283,32 @@ func (r *resourceBlobContainer) createFile(a asserter, o *testObject, s *scenari } func (r *resourceBlobContainer) cleanup(a asserter) { - if r.containerURL != nil { - deleteContainer(a, *r.containerURL) + if r.containerClient != nil { + deleteContainer(a, r.containerClient) } } func (r *resourceBlobContainer) getParam(stripTopDir bool, withSas bool, withFile string) string { - var uri url.URL + var uri string if withSas { - uri = *r.rawSasURL + uri = r.rawSasURL.String() } else { - uri = r.containerURL.URL() + uri = r.containerClient.URL() } if withFile != "" { - bURLParts := azblob.NewBlobURLParts(uri) + bURLParts, _ := blob.ParseURL(uri) bURLParts.BlobName = withFile - uri = bURLParts.URL() + uri = bURLParts.String() } if r.accountType == EAccountType.HierarchicalNamespaceEnabled() { - uri.Host = strings.ReplaceAll(uri.Host, "blob", "dfs") + uri = strings.ReplaceAll(uri, "blob", "dfs") } - return uri.String() + return uri } func (r *resourceBlobContainer) getSAS() string { @@ -321,11 +326,12 @@ func (r *resourceBlobContainer) appendSourcePath(filePath string, useSas bool) { } func (r *resourceBlobContainer) getAllProperties(a asserter) map[string]*objectProperties { - objects := scenarioHelper{}.enumerateContainerBlobProperties(a, *r.containerURL) + objects := scenarioHelper{}.enumerateContainerBlobProperties(a, r.containerClient) if r.accountType == EAccountType.HierarchicalNamespaceEnabled() { - urlParts := azblob.NewBlobURLParts(r.containerURL.URL()) - fsURL := TestResourceFactory{}.GetDatalakeServiceURL(r.accountType).NewFileSystemURL(urlParts.ContainerName).NewDirectoryURL("") + urlParts, err := blob.ParseURL(r.containerClient.URL()) + a.AssertNoErr(err) + fsURL := TestResourceFactory{}.GetDatalakeServiceURL(r.accountType).NewFileSystemURL(urlParts.ContainerName).NewDirectoryURL("/") ACL, err := fsURL.GetAccessControl(ctx) if stgErr, ok := err.(azbfs.StorageError); ok { @@ -345,7 +351,7 @@ func (r *resourceBlobContainer) getAllProperties(a asserter) map[string]*objectP } func (r *resourceBlobContainer) downloadContent(a asserter, options downloadContentOptions) []byte { - options.containerURL = *r.containerURL + options.containerClient = r.containerClient return scenarioHelper{}.downloadBlobContent(a, options) } @@ -357,15 +363,17 @@ func (r *resourceBlobContainer) createSourceSnapshot(a asserter) { type resourceAzureFileShare struct { accountType AccountType - shareURL *azfile.ShareURL // // TODO: Either eliminate SDK URLs from ResourceManager or provide means to edit it (File SDK) for which pipeline is required + shareClient *share.Client // // TODO: Either eliminate SDK URLs from ResourceManager or provide means to edit it (File SDK) for which pipeline is required rawSasURL *url.URL snapshotID string // optional, use a snapshot as the location instead } func (r *resourceAzureFileShare) createLocation(a asserter, s *scenario) { su, _, rawSasURL := TestResourceFactory{}.CreateNewFileShare(a, EAccountType.Standard()) - r.shareURL = &su - r.rawSasURL = &rawSasURL + r.shareClient = su + rawURL, err := url.Parse(rawSasURL) + a.AssertNoErr(err) + r.rawSasURL = rawURL if s.GetModifiableParameters().relativeSourcePath != "" { r.appendSourcePath(s.GetModifiableParameters().relativeSourcePath, true) } @@ -373,7 +381,7 @@ func (r *resourceAzureFileShare) createLocation(a asserter, s *scenario) { func (r *resourceAzureFileShare) createFiles(a asserter, s *scenario, isSource bool) { scenarioHelper{}.generateAzureFilesFromList(a, &generateAzureFilesFromListOptions{ - shareURL: *r.shareURL, + shareClient: r.shareClient, fileList: s.fs.allObjects(isSource), defaultSize: s.fs.defaultSize, }) @@ -381,30 +389,30 @@ func (r *resourceAzureFileShare) createFiles(a asserter, s *scenario, isSource b func (r *resourceAzureFileShare) createFile(a asserter, o *testObject, s *scenario, isSource bool) { scenarioHelper{}.generateAzureFilesFromList(a, &generateAzureFilesFromListOptions{ - shareURL: *r.shareURL, + shareClient: r.shareClient, fileList: []*testObject{o}, defaultSize: s.fs.defaultSize, }) } func (r *resourceAzureFileShare) cleanup(a asserter) { - if r.shareURL != nil { - deleteShare(a, *r.shareURL) + if r.shareClient != nil { + deleteShare(a, r.shareClient) } } func (r *resourceAzureFileShare) getParam(stripTopDir bool, withSas bool, withFile string) string { assertNoStripTopDir(stripTopDir) - var param url.URL + var uri string if withSas { - param = *r.rawSasURL + uri = r.rawSasURL.String() } else { - param = r.shareURL.URL() + uri = r.shareClient.URL() } // append the snapshot ID if present if r.snapshotID != "" || withFile != "" { - parts := azfile.NewFileURLParts(param) + parts, _ := file.ParseURL(uri) if r.snapshotID != "" { parts.ShareSnapshot = r.snapshotID } @@ -412,10 +420,10 @@ func (r *resourceAzureFileShare) getParam(stripTopDir bool, withSas bool, withFi if withFile != "" { parts.DirectoryOrFilePath = withFile } - param = parts.URL() + uri = parts.String() } - return param.String() + return uri } func (r *resourceAzureFileShare) getSAS() string { @@ -433,20 +441,20 @@ func (r *resourceAzureFileShare) appendSourcePath(filePath string, useSas bool) } func (r *resourceAzureFileShare) getAllProperties(a asserter) map[string]*objectProperties { - return scenarioHelper{}.enumerateShareFileProperties(a, *r.shareURL) + return scenarioHelper{}.enumerateShareFileProperties(a, r.shareClient) } func (r *resourceAzureFileShare) downloadContent(a asserter, options downloadContentOptions) []byte { return scenarioHelper{}.downloadFileContent(a, downloadContentOptions{ resourceRelPath: options.resourceRelPath, downloadFileContentOptions: downloadFileContentOptions{ - shareURL: *r.shareURL, + shareClient: r.shareClient, }, }) } func (r *resourceAzureFileShare) createSourceSnapshot(a asserter) { - r.snapshotID = TestResourceFactory{}.CreateNewFileShareSnapshot(a, *r.shareURL) + r.snapshotID = TestResourceFactory{}.CreateNewFileShareSnapshot(a, r.shareClient) } // // diff --git a/e2etest/declarativeScenario.go b/e2etest/declarativeScenario.go index d8a89b35c..b5486e02a 100644 --- a/e2etest/declarativeScenario.go +++ b/e2etest/declarativeScenario.go @@ -23,6 +23,7 @@ package e2etest import ( "crypto/md5" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "io" "io/fs" "net/url" @@ -34,7 +35,6 @@ import ( "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-azcopy/v10/sddl" - "github.com/Azure/azure-storage-blob-go/azblob" ) // E.g. if we have enumerationSuite/TestFooBar/Copy-LocalBlob the scenario is "Copy-LocalBlob" @@ -248,7 +248,7 @@ func (s *scenario) assignSourceAndDest() { // TODO: handle account to account (multi-container) scenarios switch loc { case common.ELocation.Local(): - return &resourceLocal{common.IffString(s.p.destNull && !isSourceAcc, common.Dev_Null, "")} + return &resourceLocal{common.Iff(s.p.destNull && !isSourceAcc, common.Dev_Null, "")} case common.ELocation.File(): return &resourceAzureFileShare{accountType: accType} case common.ELocation.Blob(): @@ -306,7 +306,7 @@ func (s *scenario) runAzCopy(logDirectory string) { result, wasClean, err := r.ExecuteAzCopyCommand( s.operation, s.state.source.getParam(s.stripTopDir, needsSAS(s.credTypes[0]), tf.objectTarget), - s.state.dest.getParam(false, needsSAS(s.credTypes[1]), common.IffString(tf.destTarget != "", tf.destTarget, tf.objectTarget)), + s.state.dest.getParam(false, needsSAS(s.credTypes[1]), common.Iff(tf.destTarget != "", tf.destTarget, tf.objectTarget)), s.credTypes[0] == common.ECredentialType.OAuthToken() || s.credTypes[1] == common.ECredentialType.OAuthToken(), // needsOAuth afterStart, s.chToStdin, logDirectory) @@ -331,7 +331,7 @@ func (s *scenario) resumeAzCopy(logDir string) { defer close(s.chToStdin) r := newTestRunner() - if sas := s.state.source.getSAS(); s.GetTestFiles().sourcePublic == azblob.PublicAccessNone && sas != "" { + if sas := s.state.source.getSAS(); s.GetTestFiles().sourcePublic == nil && sas != "" { r.flags["source-sas"] = sas } if sas := s.state.dest.getSAS(); sas != "" { @@ -572,7 +572,7 @@ func (s *scenario) validateContent() { } } -func (s *scenario) validatePOSIXProperties(f *testObject, metadata map[string]string) { +func (s *scenario) validatePOSIXProperties(f *testObject, metadata map[string]*string) { if !s.p.preservePOSIXProperties { return } @@ -592,7 +592,7 @@ func (s *scenario) validatePOSIXProperties(f *testObject, metadata map[string]st s.a.Assert(f.verificationProperties.posixProperties.EquivalentToStatAdapter(adapter), equals(), "", "POSIX properties were mismatched") } -func (s *scenario) validateSymlink(f *testObject, metadata map[string]string) { +func (s *scenario) validateSymlink(f *testObject, metadata map[string]*string) { c := s.GetAsserter() prepareSymlinkForComparison := func(oldName string) string { @@ -600,12 +600,12 @@ func (s *scenario) validateSymlink(f *testObject, metadata map[string]string) { case common.EFromTo.LocalBlob(): source := s.state.source.(*resourceLocal) - return strings.TrimPrefix(oldName, source.dirPath + common.OS_PATH_SEPARATOR) + return strings.TrimPrefix(oldName, source.dirPath+common.OS_PATH_SEPARATOR) case common.EFromTo.BlobLocal(): dest := s.state.dest.(*resourceLocal) _, _, _, _, addedDirAtDest := s.getTransferInfo() - return strings.TrimPrefix(oldName, path.Join(dest.dirPath, addedDirAtDest) + common.OS_PATH_SEPARATOR) + return strings.TrimPrefix(oldName, path.Join(dest.dirPath, addedDirAtDest)+common.OS_PATH_SEPARATOR) case common.EFromTo.BlobBlob(): return oldName // no adjustment necessary default: @@ -624,7 +624,7 @@ func (s *scenario) validateSymlink(f *testObject, metadata map[string]string) { symlinkDest := path.Join(dest.(*resourceLocal).dirPath, addedDirAtDest, f.name) stat, err := os.Lstat(symlinkDest) c.AssertNoErr(err) - c.Assert(stat.Mode() & os.ModeSymlink, equals(), os.ModeSymlink, "the file is not a symlink") + c.Assert(stat.Mode()&os.ModeSymlink, equals(), os.ModeSymlink, "the file is not a symlink") oldName, err := os.Readlink(symlinkDest) c.AssertNoErr(err) @@ -632,7 +632,7 @@ func (s *scenario) validateSymlink(f *testObject, metadata map[string]string) { case common.ELocation.Blob(): val, ok := metadata[common.POSIXSymlinkMeta] c.Assert(ok, equals(), true) - c.Assert(val, equals(), "true") + c.Assert(*val, equals(), "true") content := dest.downloadContent(c, downloadContentOptions{ resourceRelPath: fixSlashes(path.Join(addedDirAtDest, f.name), common.ELocation.Blob()), @@ -649,20 +649,29 @@ func (s *scenario) validateSymlink(f *testObject, metadata map[string]string) { } } +func metadataWithProperCasing(original map[string]*string) map[string]*string { + result := make(map[string]*string) + for k, v := range original { + result[strings.ToLower(k)] = v + } + return result +} + // // Individual property validation routines -func (s *scenario) validateMetadata(expected, actual map[string]string) { - for _,v := range common.AllLinuxProperties { // properties are evaluated elsewhere +func (s *scenario) validateMetadata(expected, actual map[string]*string) { + for _, v := range common.AllLinuxProperties { // properties are evaluated elsewhere delete(expected, v) delete(actual, v) } s.a.Assert(len(actual), equals(), len(expected), "Both should have same number of metadata entries") + cased := metadataWithProperCasing(actual) for key := range expected { exValue := expected[key] - actualValue, ok := actual[key] + actualValue, ok := cased[key] s.a.Assert(ok, equals(), true, fmt.Sprintf("expect key '%s' to be found in destination metadata", key)) if ok { - s.a.Assert(exValue, equals(), actualValue, fmt.Sprintf("Expect value for key '%s' to be '%s' but found '%s'", key, exValue, actualValue)) + s.a.Assert(exValue, equals(), actualValue, fmt.Sprintf("Expect value for key '%s' to be '%s' but found '%s'", key, *exValue, *actualValue)) } } } @@ -679,7 +688,7 @@ func (s *scenario) validateADLSACLs(expected, actual *string) { s.a.Assert(expected, equals(), actual, fmt.Sprintf("Expected Gen 2 ACL: %s but found: %s", *expected, *actual)) } -func (s *scenario) validateCPKByScope(expected, actual *common.CpkScopeInfo) { +func (s *scenario) validateCPKByScope(expected, actual *blob.CPKScopeInfo) { if expected == nil && actual == nil { return } @@ -691,7 +700,7 @@ func (s *scenario) validateCPKByScope(expected, actual *common.CpkScopeInfo) { fmt.Sprintf("Expected encryption scope is: '%v' but found: '%v'", expected.EncryptionScope, actual.EncryptionScope)) } -func (s *scenario) validateCPKByValue(expected, actual *common.CpkInfo) { +func (s *scenario) validateCPKByValue(expected, actual *blob.CPKInfo) { if expected == nil && actual == nil { return } @@ -700,8 +709,8 @@ func (s *scenario) validateCPKByValue(expected, actual *common.CpkInfo) { return } - s.a.Assert(expected.EncryptionKeySha256, equals(), actual.EncryptionKeySha256, - fmt.Sprintf("Expected encryption scope is: '%v' but found: '%v'", expected.EncryptionKeySha256, actual.EncryptionKeySha256)) + s.a.Assert(expected.EncryptionKeySHA256, equals(), actual.EncryptionKeySHA256, + fmt.Sprintf("Expected encryption scope is: '%v' but found: '%v'", expected.EncryptionKeySHA256, actual.EncryptionKeySHA256)) } // Validate blob tags @@ -858,3 +867,7 @@ func (s *scenario) GetAsserter() asserter { func (s *scenario) GetDestination() resourceManager { return s.state.dest } + +func (s *scenario) GetSource() resourceManager { + return s.state.source +} diff --git a/e2etest/declarativeTestFiles.go b/e2etest/declarativeTestFiles.go index 384295f52..18dcee94a 100644 --- a/e2etest/declarativeTestFiles.go +++ b/e2etest/declarativeTestFiles.go @@ -23,6 +23,9 @@ package e2etest import ( "encoding/hex" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" "math" "reflect" "strconv" @@ -31,7 +34,6 @@ import ( "github.com/Azure/azure-storage-azcopy/v10/cmd" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" ) /////////////// @@ -92,7 +94,7 @@ type objectProperties struct { posixProperties *objectUnixStatContainer size *int64 contentHeaders *contentHeaders - nameValueMetadata map[string]string + nameValueMetadata map[string]*string blobTags common.BlobTags blobType common.BlobType creationTime *time.Time @@ -100,8 +102,8 @@ type objectProperties struct { smbAttributes *uint32 smbPermissionsSddl *string adlsPermissionsACL *string // TODO: Test owner and group; needs a good target though. - cpkInfo *common.CpkInfo - cpkScopeInfo *common.CpkScopeInfo + cpkInfo *blob.CPKInfo + cpkScopeInfo *blob.CPKScopeInfo } type objectUnixStatContainer struct { @@ -180,7 +182,7 @@ func (o *objectUnixStatContainer) EquivalentToStatAdapter(s common.UnixStatAdapt return strings.Join(mismatched, ", ") } -func (o *objectUnixStatContainer) AddToMetadata(metadata map[string]string) { +func (o *objectUnixStatContainer) AddToMetadata(metadata map[string]*string) { if o == nil { return } @@ -189,29 +191,29 @@ func (o *objectUnixStatContainer) AddToMetadata(metadata map[string]string) { if o.mode != nil { // always overwrite; perhaps it got changed in one of the hooks. mask |= common.STATX_MODE - metadata[common.POSIXModeMeta] = strconv.FormatUint(uint64(*o.mode), 10) + metadata[common.POSIXModeMeta] = to.Ptr(strconv.FormatUint(uint64(*o.mode), 10)) delete(metadata, common.POSIXFIFOMeta) delete(metadata, common.POSIXSocketMeta) switch { case *o.mode & common.S_IFIFO == common.S_IFIFO: - metadata[common.POSIXFIFOMeta] = "true" + metadata[common.POSIXFIFOMeta] = to.Ptr("true") case *o.mode & common.S_IFSOCK == common.S_IFSOCK: - metadata[common.POSIXSocketMeta] = "true" + metadata[common.POSIXSocketMeta] = to.Ptr("true") } } if o.accessTime != nil { mask |= common.STATX_ATIME - metadata[common.POSIXATimeMeta] = strconv.FormatInt(o.accessTime.UnixNano(), 10) + metadata[common.POSIXATimeMeta] = to.Ptr(strconv.FormatInt(o.accessTime.UnixNano(), 10)) } if o.modTime != nil { mask |= common.STATX_MTIME - metadata[common.POSIXModTimeMeta] = strconv.FormatInt(o.modTime.UnixNano(), 10) + metadata[common.POSIXModTimeMeta] = to.Ptr(strconv.FormatInt(o.modTime.UnixNano(), 10)) } - metadata[common.LINUXStatxMaskMeta] = strconv.FormatUint(uint64(mask), 10) + metadata[common.LINUXStatxMaskMeta] = to.Ptr(strconv.FormatUint(uint64(mask), 10)) } // returns op.size, if present, else defaultSize @@ -255,7 +257,7 @@ func (op objectProperties) DeepCopy() objectProperties { ret.contentHeaders = op.contentHeaders.DeepCopy() } - ret.nameValueMetadata = make(map[string]string) + ret.nameValueMetadata = make(map[string]*string) for k, v := range op.nameValueMetadata { ret.nameValueMetadata[k] = v } @@ -463,7 +465,7 @@ type testFiles struct { defaultSize string // how big should the files be? Applies to those files that don't specify individual sizes. Uses the same K, M, G suffixes as benchmark mode's size-per-file objectTarget string // should we target only a single file/folder? destTarget string // do we want to copy under a folder or rename? - sourcePublic azblob.PublicAccessType // should the source blob container be public? (ONLY APPLIES TO BLOB.) + sourcePublic *container.PublicAccessType // should the source blob container be public? (ONLY APPLIES TO BLOB.) // The files/folders that we expect to be transferred. Elements of the list must be strings or testObject's. // A string can be used if no properties need to be specified. diff --git a/e2etest/declarativeWithPropertyProviders.go b/e2etest/declarativeWithPropertyProviders.go index 839c31a10..a24efd355 100644 --- a/e2etest/declarativeWithPropertyProviders.go +++ b/e2etest/declarativeWithPropertyProviders.go @@ -21,6 +21,7 @@ package e2etest import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "time" "github.com/Azure/azure-storage-azcopy/v10/cmd" @@ -47,7 +48,7 @@ type with struct { contentType string contentMD5 []byte - nameValueMetadata map[string]string + nameValueMetadata map[string]*string blobTags string blobType common.BlobType lastWriteTime time.Time @@ -164,13 +165,13 @@ func (w with) createObjectProperties() *objectProperties { if w.cpkByName != "" { populated = true cpkScopeInfo := common.GetCpkScopeInfo(w.cpkByName) - result.cpkScopeInfo = &cpkScopeInfo + result.cpkScopeInfo = cpkScopeInfo } if w.cpkByValue { populated = true cpkInfo := common.GetCpkInfo(w.cpkByValue) - result.cpkInfo = &cpkInfo + result.cpkInfo = cpkInfo } if populated { @@ -219,7 +220,7 @@ func (withDirStubMetadata) appliesToVerification() bool { } func (withDirStubMetadata) createObjectProperties() *objectProperties { - m := map[string]string{"hdi_isfolder": "true"} // special flag that says this file is a stub + m := map[string]*string{"hdi_isfolder": to.Ptr("true")} // special flag that says this file is a stub size := int64(0) return &objectProperties{ size: &size, diff --git a/e2etest/factory.go b/e2etest/factory.go index 65a37021a..aa753bfda 100644 --- a/e2etest/factory.go +++ b/e2etest/factory.go @@ -23,6 +23,17 @@ package e2etest import ( "context" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + blobsas "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" + blobservice "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + filesas "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + fileservice "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-azcopy/v10/ste" "net/url" @@ -34,37 +45,44 @@ import ( "time" "github.com/Azure/azure-storage-azcopy/v10/azbfs" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" "github.com/google/uuid" ) // provide convenient methods to get access to test resources such as accounts, containers/shares, directories type TestResourceFactory struct{} -func (TestResourceFactory) GetBlobServiceURL(accountType AccountType) azblob.ServiceURL { +func (TestResourceFactory) GetBlobServiceURL(accountType AccountType) *blobservice.Client { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(accountType) - u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName)) + resourceURL := fmt.Sprintf("https://%s.blob.core.windows.net/", accountName) - credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) + credential, err := blob.NewSharedKeyCredential(accountName, accountKey) if err != nil { panic(err) } - pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{}) - return azblob.NewServiceURL(*u, pipeline) + bsc, err := blobservice.NewClientWithSharedKeyCredential(resourceURL, credential, nil) + if err != nil { + panic(err) + } + return bsc } -func (TestResourceFactory) GetFileServiceURL(accountType AccountType) azfile.ServiceURL { +func (TestResourceFactory) GetFileServiceURL(accountType AccountType) *fileservice.Client { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(accountType) - u, _ := url.Parse(fmt.Sprintf("https://%s.file.core.windows.net/", accountName)) + resourceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) - credential, err := azfile.NewSharedKeyCredential(accountName, accountKey) + credential, err := file.NewSharedKeyCredential(accountName, accountKey) if err != nil { panic(err) } - p := ste.NewFilePipeline(credential, azfile.PipelineOptions{}, azfile.RetryOptions{}, nil, ste.NewAzcopyHTTPClient(20), nil, common.ETrailingDotOption.Enable(), common.ELocation.File()) - - return azfile.NewServiceURL(*u, p) + perRetryPolicies := []policy.Policy{ste.NewTrailingDotPolicy(to.Ptr(common.ETrailingDotOption.Enable()), nil)} + clientOptions := azcore.ClientOptions{ + PerRetryPolicies: perRetryPolicies, + } + fsc, err := fileservice.NewClientWithSharedKeyCredential(resourceURL, credential, &fileservice.ClientOptions{ClientOptions: clientOptions}) + if err != nil { + panic(err) + } + return fsc } func (TestResourceFactory) GetDatalakeServiceURL(accountType AccountType) azbfs.ServiceURL { @@ -76,115 +94,100 @@ func (TestResourceFactory) GetDatalakeServiceURL(accountType AccountType) azbfs. return azbfs.NewServiceURL(*u, pipeline) } -func (TestResourceFactory) GetBlobServiceURLWithSAS(c asserter, accountType AccountType) azblob.ServiceURL { +func (TestResourceFactory) GetBlobServiceURLWithSAS(c asserter, accountType AccountType) *blobservice.Client { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(accountType) - credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) + credential, err := blob.NewSharedKeyCredential(accountName, accountKey) c.AssertNoErr(err) - - sasQueryParams, err := azblob.AccountSASSignatureValues{ - Protocol: azblob.SASProtocolHTTPS, - ExpiryTime: time.Now().Add(48 * time.Hour), - Permissions: azblob.AccountSASPermissions{Read: true, List: true, Write: true, Delete: true, DeletePreviousVersion: true, Add: true, Create: true, Update: true, Process: true, Tag: true, FilterByTags: true}.String(), - Services: azblob.AccountSASServices{File: true, Blob: true, Queue: true}.String(), - ResourceTypes: azfile.AccountSASResourceTypes{Service: true, Container: true, Object: true}.String(), - }.NewSASQueryParameters(credential) + rawURL := fmt.Sprintf("https://%s.blob.core.windows.net/", credential.AccountName()) + client, err := blobservice.NewClientWithSharedKeyCredential(rawURL, credential, nil) c.AssertNoErr(err) - // construct the url from scratch - qp := sasQueryParams.Encode() - rawURL := fmt.Sprintf("https://%s.blob.core.windows.net/?%s", - credential.AccountName(), qp) + sasURL, err := client.GetSASURL( + blobsas.AccountResourceTypes{Service: true, Container: true, Object: true}, + blobsas.AccountPermissions{Read: true, List: true, Write: true, Delete: true, DeletePreviousVersion: true, Add: true, Create: true, Update: true, Process: true, Tag: true, FilterByTags: true}, + time.Now().Add(48 * time.Hour), + nil) + c.AssertNoErr(err) - // convert the raw url and validate it was parsed successfully - fullURL, err := url.Parse(rawURL) + client, err = blobservice.NewClientWithNoCredential(sasURL, nil) c.AssertNoErr(err) - return azblob.NewServiceURL(*fullURL, azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})) + return client } -func (TestResourceFactory) GetContainerURLWithSAS(c asserter, accountType AccountType, containerName string) azblob.ContainerURL { +func (TestResourceFactory) GetContainerURLWithSAS(c asserter, accountType AccountType, containerName string) *container.Client { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(accountType) - credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) + credential, err := blob.NewSharedKeyCredential(accountName, accountKey) c.AssertNoErr(err) - - sasQueryParams, err := azblob.BlobSASSignatureValues{ - Protocol: azblob.SASProtocolHTTPS, - ExpiryTime: time.Now().UTC().Add(48 * time.Hour), - ContainerName: containerName, - Permissions: azblob.ContainerSASPermissions{Read: true, Add: true, Write: true, Create: true, Delete: true, DeletePreviousVersion: true, List: true, Tag: true, ModifyOwnership: true, ModifyPermissions: true}.String(), - }.NewSASQueryParameters(credential) + rawURL := fmt.Sprintf("https://%s.blob.core.windows.net/%s", credential.AccountName(), containerName) + client, err := container.NewClientWithSharedKeyCredential(rawURL, credential, nil) c.AssertNoErr(err) - // construct the url from scratch - qp := sasQueryParams.Encode() - rawURL := fmt.Sprintf("https://%s.blob.core.windows.net/%s?%s", - credential.AccountName(), containerName, qp) + sasURL, err := client.GetSASURL( + blobsas.ContainerPermissions{Read: true, Add: true, Write: true, Create: true, Delete: true, DeletePreviousVersion: true, List: true, ModifyOwnership: true, ModifyPermissions: true, Tag: true}, + time.Now().Add(48 * time.Hour), + nil) + c.AssertNoErr(err) - // convert the raw url and validate it was parsed successfully - fullURL, err := url.Parse(rawURL) + client, err = container.NewClientWithNoCredential(sasURL, nil) c.AssertNoErr(err) - return azblob.NewContainerURL(*fullURL, azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})) + return client } -func (TestResourceFactory) GetFileShareULWithSAS(c asserter, accountType AccountType, containerName string) azfile.ShareURL { +func (TestResourceFactory) GetFileShareURLWithSAS(c asserter, accountType AccountType, containerName string) *share.Client { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(accountType) - credential, err := azfile.NewSharedKeyCredential(accountName, accountKey) + credential, err := file.NewSharedKeyCredential(accountName, accountKey) c.AssertNoErr(err) - - sasQueryParams, err := azfile.FileSASSignatureValues{ - Protocol: azfile.SASProtocolHTTPS, - ExpiryTime: time.Now().UTC().Add(48 * time.Hour), - ShareName: containerName, - Permissions: azfile.ShareSASPermissions{Read: true, Write: true, Create: true, Delete: true, List: true}.String(), - }.NewSASQueryParameters(credential) + rawURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", credential.AccountName(), containerName) + client, err := share.NewClientWithSharedKeyCredential(rawURL, credential, nil) c.AssertNoErr(err) - // construct the url from scratch - qp := sasQueryParams.Encode() - rawURL := fmt.Sprintf("https://%s.file.core.windows.net/%s?%s", - credential.AccountName(), containerName, qp) - - // convert the raw url and validate it was parsed successfully - fullURL, err := url.Parse(rawURL) + sasURL, err := client.GetSASURL( + filesas.SharePermissions{Read: true, Write: true, Create: true, Delete: true, List: true}, + time.Now().Add(48 * time.Hour), + nil) + c.AssertNoErr(err) + perRetryPolicies := []policy.Policy{ste.NewTrailingDotPolicy(to.Ptr(common.ETrailingDotOption.Enable()), nil)} + clientOptions := azcore.ClientOptions{ + PerRetryPolicies: perRetryPolicies, + } + client, err = share.NewClientWithNoCredential(sasURL, &share.ClientOptions{ClientOptions: clientOptions}) c.AssertNoErr(err) - p := ste.NewFilePipeline(credential, azfile.PipelineOptions{}, azfile.RetryOptions{}, nil, ste.NewAzcopyHTTPClient(20), nil, common.ETrailingDotOption.Enable(), common.ELocation.File()) - return azfile.NewShareURL(*fullURL, p) + return client } -func (TestResourceFactory) GetBlobURLWithSAS(c asserter, accountType AccountType, containerName string, blobName string) azblob.BlobURL { +func (TestResourceFactory) GetBlobURLWithSAS(c asserter, accountType AccountType, containerName string, blobName string) *blob.Client { containerURLWithSAS := TestResourceFactory{}.GetContainerURLWithSAS(c, accountType, containerName) - blobURLWithSAS := containerURLWithSAS.NewBlobURL(blobName) + blobURLWithSAS := containerURLWithSAS.NewBlobClient(blobName) return blobURLWithSAS } -func (TestResourceFactory) CreateNewContainer(c asserter, publicAccess azblob.PublicAccessType, accountType AccountType) (container azblob.ContainerURL, name string, rawURL url.URL) { +func (TestResourceFactory) CreateNewContainer(c asserter, publicAccess *container.PublicAccessType, accountType AccountType) (cc *container.Client, name string, rawURL string) { name = TestResourceNameGenerator{}.GenerateContainerName(c) - container = TestResourceFactory{}.GetBlobServiceURL(accountType).NewContainerURL(name) + cc = TestResourceFactory{}.GetBlobServiceURL(accountType).NewContainerClient(name) - cResp, err := container.Create(context.Background(), nil, publicAccess) + _, err := cc.Create(context.Background(), &container.CreateOptions{Access: publicAccess}) c.AssertNoErr(err) - c.Assert(cResp.StatusCode(), equals(), 201) - return container, name, TestResourceFactory{}.GetContainerURLWithSAS(c, accountType, name).URL() + return cc, name, TestResourceFactory{}.GetContainerURLWithSAS(c, accountType, name).URL() } -const defaultShareQuotaGB = 512 +const defaultShareQuotaGB = int32(512) -func (TestResourceFactory) CreateNewFileShare(c asserter, accountType AccountType) (fileShare azfile.ShareURL, name string, rawSasURL url.URL) { +func (TestResourceFactory) CreateNewFileShare(c asserter, accountType AccountType) (fileShare *share.Client, name string, rawSasURL string) { name = TestResourceNameGenerator{}.GenerateContainerName(c) - fileShare = TestResourceFactory{}.GetFileServiceURL(accountType).NewShareURL(name) + fileShare = TestResourceFactory{}.GetFileServiceURL(accountType).NewShareClient(name) - cResp, err := fileShare.Create(context.Background(), nil, defaultShareQuotaGB) + _, err := fileShare.Create(context.Background(), &share.CreateOptions{Quota: to.Ptr(defaultShareQuotaGB)}) c.AssertNoErr(err) - c.Assert(cResp.StatusCode(), equals(), 201) - return fileShare, name, TestResourceFactory{}.GetFileShareULWithSAS(c, accountType, name).URL() + return fileShare, name, TestResourceFactory{}.GetFileShareURLWithSAS(c, accountType, name).URL() } -func (TestResourceFactory) CreateNewFileShareSnapshot(c asserter, fileShare azfile.ShareURL) (snapshotID string) { - resp, err := fileShare.CreateSnapshot(context.TODO(), azfile.Metadata{}) +func (TestResourceFactory) CreateNewFileShareSnapshot(c asserter, fileShare *share.Client) (snapshotID string) { + resp, err := fileShare.CreateSnapshot(context.TODO(), nil) c.AssertNoErr(err) - return resp.Snapshot() + return *resp.Snapshot } func (TestResourceFactory) CreateLocalDirectory(c asserter) (dstDirName string) { diff --git a/e2etest/helpers.go b/e2etest/helpers.go index abeaf6534..c59fe9e68 100644 --- a/e2etest/helpers.go +++ b/e2etest/helpers.go @@ -25,8 +25,23 @@ package e2etest import ( "bytes" "context" - "errors" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob" + blobsas "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" + blobservice "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" + sharefile "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + filesas "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + fileservice "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "io" "math/rand" "mime" "net/url" @@ -41,9 +56,6 @@ import ( "github.com/Azure/azure-storage-azcopy/v10/azbfs" "github.com/Azure/azure-storage-azcopy/v10/ste" "github.com/minio/minio-go" - - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" ) var ctx = context.Background() @@ -107,11 +119,11 @@ func generateFilesystemName(c asserter) string { return generateName(c, blobfsPrefix, 63) } -func getShareURL(c asserter, fsu azfile.ServiceURL) (share azfile.ShareURL, name string) { +func getShareURL(c asserter, fsc *fileservice.Client) (sc *share.Client, name string) { name = generateShareName(c) - share = fsu.NewShareURL(name) + sc = fsc.NewShareClient(name) - return share, name + return sc, name } func generateAzureFileName(c asserter) string { @@ -122,11 +134,10 @@ func generateBfsFileName(c asserter) string { return generateName(c, blobfsPrefix, 0) } -func getContainerURL(c asserter, bsu azblob.ServiceURL) (container azblob.ContainerURL, name string) { +func getContainerURL(c asserter, bsc *blobservice.Client) (cc *container.Client, name string) { name = generateContainerName(c) - container = bsu.NewContainerURL(name) - - return container, name + cc = bsc.NewContainerClient(name) + return } func getFilesystemURL(c asserter, bfssu azbfs.ServiceURL) (filesystem azbfs.FileSystemURL, name string) { @@ -136,11 +147,11 @@ func getFilesystemURL(c asserter, bfssu azbfs.ServiceURL) (filesystem azbfs.File return } -func getBlockBlobURL(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.BlockBlobURL, name string) { +func getBlockBlobURL(c asserter, cc *container.Client, prefix string) (bc *blockblob.Client, name string) { name = prefix + generateBlobName(c) - blob = container.NewBlockBlobURL(name) + bc = cc.NewBlockBlobClient(name) - return blob, name + return bc, name } func getBfsFileURL(c asserter, filesystemURL azbfs.FileSystemURL, prefix string) (file azbfs.FileURL, name string) { @@ -150,47 +161,39 @@ func getBfsFileURL(c asserter, filesystemURL azbfs.FileSystemURL, prefix string) return } -func getAppendBlobURL(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.AppendBlobURL, name string) { +func getAppendBlobURL(c asserter, cc *container.Client, prefix string) (bc *appendblob.Client, name string) { name = generateBlobName(c) - blob = container.NewAppendBlobURL(prefix + name) - - return blob, name + bc = cc.NewAppendBlobClient(prefix + name) + return } -func getPageBlobURL(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.PageBlobURL, name string) { +func getPageBlobURL(c asserter, cc *container.Client, prefix string) (bc *pageblob.Client, name string) { name = generateBlobName(c) - blob = container.NewPageBlobURL(prefix + name) - + bc = cc.NewPageBlobClient(prefix + name) return } -func getAzureFileURL(c asserter, shareURL azfile.ShareURL, prefix string) (fileURL azfile.FileURL, name string) { +func getAzureFileURL(c asserter, sc *share.Client, prefix string) (fc *sharefile.Client, name string) { name = prefix + generateAzureFileName(c) - fileURL = shareURL.NewRootDirectoryURL().NewFileURL(name) + fc = sc.NewRootDirectoryClient().NewFileClient(name) return } -func getReaderToRandomBytes(n int) *bytes.Reader { - r, _ := getRandomDataAndReader(n) - return r -} - // todo: consider whether to replace with common.NewRandomDataGenerator, which is // believed to be faster -func getRandomDataAndReader(n int) (*bytes.Reader, []byte) { +func getRandomDataAndReader(n int) (io.ReadSeekCloser, []byte) { data := make([]byte, n) rand.Read(data) - return bytes.NewReader(data), data + return streaming.NopCloser(bytes.NewReader(data)), data } -func createNewContainer(c asserter, bsu azblob.ServiceURL) (container azblob.ContainerURL, name string) { - container, name = getContainerURL(c, bsu) +func createNewContainer(c asserter, bsc *blobservice.Client) (cc *container.Client, name string) { + cc, name = getContainerURL(c, bsc) - cResp, err := container.Create(ctx, nil, azblob.PublicAccessNone) + _, err := cc.Create(ctx, nil) c.AssertNoErr(err) - c.Assert(cResp.StatusCode(), equals(), 201) - return container, name + return } func createNewFilesystem(c asserter, bfssu azbfs.ServiceURL) (filesystem azbfs.FileSystemURL, name string) { @@ -220,36 +223,31 @@ func createNewBfsFile(c asserter, filesystem azbfs.FileSystemURL, prefix string) return } -func createNewBlockBlob(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.BlockBlobURL, name string) { - blob, name = getBlockBlobURL(c, container, prefix) - - cResp, err := blob.Upload(ctx, strings.NewReader(blockBlobDefaultData), azblob.BlobHTTPHeaders{}, - nil, azblob.BlobAccessConditions{}, azblob.DefaultAccessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) +func createNewBlockBlob(c asserter, cc *container.Client, prefix string) (bc *blockblob.Client, name string) { + bc, name = getBlockBlobURL(c, cc, prefix) + _, err := bc.Upload(ctx, streaming.NopCloser(strings.NewReader(blockBlobDefaultData)), nil) c.AssertNoErr(err) - c.Assert(cResp.StatusCode(), equals(), 201) return } -func createNewAzureShare(c asserter, fsu azfile.ServiceURL) (share azfile.ShareURL, name string) { - share, name = getShareURL(c, fsu) +func createNewAzureShare(c asserter, fsc *fileservice.Client) (sc *share.Client, name string) { + sc, name = getShareURL(c, fsc) - cResp, err := share.Create(ctx, nil, 0) + _, err := sc.Create(ctx, nil) c.AssertNoErr(err) - c.Assert(cResp.StatusCode(), equals(), 201) - return share, name + return sc, name } -func createNewAzureFile(c asserter, share azfile.ShareURL, prefix string) (file azfile.FileURL, name string) { - file, name = getAzureFileURL(c, share, prefix) +func createNewAzureFile(c asserter, sc *share.Client, prefix string) (fc *sharefile.Client, name string) { + fc, name = getAzureFileURL(c, sc, prefix) // generate parents first - generateParentsForAzureFile(c, file) + generateParentsForAzureFile(c, fc) - cResp, err := file.Create(ctx, defaultAzureFileSizeInBytes, azfile.FileHTTPHeaders{}, azfile.Metadata{}) + _, err := fc.Create(ctx, defaultAzureFileSizeInBytes, nil) c.AssertNoErr(err) - c.Assert(cResp.StatusCode(), equals(), 201) return } @@ -258,38 +256,50 @@ func newNullFolderCreationTracker() ste.FolderCreationTracker { return ste.NewFolderCreationTracker(common.EFolderPropertiesOption.NoFolders(), nil) } -func generateParentsForAzureFile(c asserter, fileURL azfile.FileURL) { +func getFileServiceClient() *fileservice.Client { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) - credential, _ := azfile.NewSharedKeyCredential(accountName, accountKey) - p := ste.NewFilePipeline(credential, azfile.PipelineOptions{}, azfile.RetryOptions{}, nil, ste.NewAzcopyHTTPClient(20), nil, common.ETrailingDotOption.Enable(), common.ELocation.File()) - err := ste.AzureFileParentDirCreator{}.CreateParentDirToRoot(ctx, fileURL, p, newNullFolderCreationTracker()) - c.AssertNoErr(err) + u := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + credential, err := fileservice.NewSharedKeyCredential(accountName, accountKey) + if err != nil { + panic(err) + } + perRetryPolicies := []policy.Policy{ste.NewTrailingDotPolicy(to.Ptr(common.ETrailingDotOption.Enable()), nil)} + clientOptions := azcore.ClientOptions{ + PerRetryPolicies: perRetryPolicies, + } + client, err := fileservice.NewClientWithSharedKeyCredential(u, credential, &fileservice.ClientOptions{ClientOptions: clientOptions}) + if err != nil { + panic(err) + } + return client } -func createNewAppendBlob(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.AppendBlobURL, name string) { - blob, name = getAppendBlobURL(c, container, prefix) +func generateParentsForAzureFile(c asserter, fc *sharefile.Client) { + fsc := getFileServiceClient() + err := ste.AzureFileParentDirCreator{}.CreateParentDirToRoot(ctx, fc, fsc, newNullFolderCreationTracker()) + c.AssertNoErr(err) +} - resp, err := blob.Create(ctx, azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{}, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) +func createNewAppendBlob(c asserter, cc *container.Client, prefix string) (bc *appendblob.Client, name string) { + bc, name = getAppendBlobURL(c, cc, prefix) + _, err := bc.Create(ctx, nil) c.AssertNoErr(err) - c.Assert(resp.StatusCode(), equals(), 201) return } -func createNewPageBlob(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.PageBlobURL, name string) { - blob, name = getPageBlobURL(c, container, prefix) - - resp, err := blob.Create(ctx, azblob.PageBlobPageBytes*10, 0, azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{}, azblob.DefaultPremiumBlobAccessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) +func createNewPageBlob(c asserter, cc *container.Client, prefix string) (bc *pageblob.Client, name string) { + bc, name = getPageBlobURL(c, cc, prefix) + _, err := bc.Create(ctx, pageblob.PageBytes*10, nil) c.AssertNoErr(err) - c.Assert(resp.StatusCode(), equals(), 201) return } -func deleteContainer(c asserter, container azblob.ContainerURL) { - resp, err := container.Delete(ctx, azblob.ContainerAccessConditions{}) +func deleteContainer(c asserter, cc *container.Client) { + _, err := cc.Delete(ctx, nil) c.AssertNoErr(err) - c.Assert(resp.StatusCode(), equals(), 202) } func deleteFilesystem(c asserter, filesystem azbfs.FileSystemURL) { @@ -405,50 +415,36 @@ func cleanS3Account(c asserter, client *minio.Client) { time.Sleep(time.Minute) } -func cleanBlobAccount(c asserter, serviceURL azblob.ServiceURL) { - marker := azblob.Marker{} - for marker.NotDone() { - resp, err := serviceURL.ListContainersSegment(ctx, marker, azblob.ListContainersSegmentOptions{}) +func cleanBlobAccount(c asserter, sc *blobservice.Client) { + pager := sc.NewListContainersPager(nil) + for pager.More() { + resp, err := pager.NextPage(ctx) c.AssertNoErr(err) for _, v := range resp.ContainerItems { - _, err = serviceURL.NewContainerURL(v.Name).Delete(ctx, azblob.ContainerAccessConditions{}) + _, err = sc.NewContainerClient(*v.Name).Delete(ctx, nil) c.AssertNoErr(err) } - - marker = resp.NextMarker } } -func cleanFileAccount(c asserter, serviceURL azfile.ServiceURL) { - marker := azfile.Marker{} - for marker.NotDone() { - resp, err := serviceURL.ListSharesSegment(ctx, marker, azfile.ListSharesOptions{}) +func cleanFileAccount(c asserter, sc *fileservice.Client) { + pager := sc.NewListSharesPager(nil) + for pager.More() { + resp, err := pager.NextPage(ctx) c.AssertNoErr(err) - for _, v := range resp.ShareItems { - _, err = serviceURL.NewShareURL(v.Name).Delete(ctx, azfile.DeleteSnapshotsOptionNone) + for _, v := range resp.Shares { + _, err = sc.NewShareClient(*v.Name).Delete(ctx, nil) c.AssertNoErr(err) } - - marker = resp.NextMarker } time.Sleep(time.Minute) } -func getGenericCredentialForFile(accountType string) (*azfile.SharedKeyCredential, error) { - accountNameEnvVar := accountType + "ACCOUNT_NAME" - accountKeyEnvVar := accountType + "ACCOUNT_KEY" - accountName, accountKey := os.Getenv(accountNameEnvVar), os.Getenv(accountKeyEnvVar) - if accountName == "" || accountKey == "" { - return nil, errors.New(accountNameEnvVar + " and/or " + accountKeyEnvVar + " environment variables not specified.") - } - return azfile.NewSharedKeyCredential(accountName, accountKey) -} - -func deleteShare(c asserter, share azfile.ShareURL) { - _, err := share.Delete(ctx, azfile.DeleteSnapshotsOptionInclude) +func deleteShare(c asserter, sc *share.Client) { + _, err := sc.Delete(ctx, &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) c.AssertNoErr(err) } @@ -457,113 +453,88 @@ func deleteShare(c asserter, share azfile.ShareURL) { // those changes not being reflected yet, we will wait 30 seconds and try the test again. If it fails this time for any reason, // we fail the test. It is the responsibility of the the testImplFunc to determine which error string indicates the test should be retried. // There can only be one such string. All errors that cannot be due to this detail should be asserted and not returned as an error string. -func runTestRequiringServiceProperties(c asserter, bsu azblob.ServiceURL, code string, - enableServicePropertyFunc func(asserter, azblob.ServiceURL), - testImplFunc func(asserter, azblob.ServiceURL) error, - disableServicePropertyFunc func(asserter, azblob.ServiceURL)) { - enableServicePropertyFunc(c, bsu) - defer disableServicePropertyFunc(c, bsu) - err := testImplFunc(c, bsu) +func runTestRequiringServiceProperties(c *chk.C, bsc *blobservice.Client, code string, + enableServicePropertyFunc func(*chk.C, *blobservice.Client), + testImplFunc func(*chk.C, *blobservice.Client) error, + disableServicePropertyFunc func(*chk.C, *blobservice.Client)) { + enableServicePropertyFunc(c, bsc) + defer disableServicePropertyFunc(c, bsc) + err := testImplFunc(c, bsc) // We cannot assume that the error indicative of slow update will necessarily be a StorageError. As in ListBlobs. if err != nil && err.Error() == code { time.Sleep(time.Second * 30) - err = testImplFunc(c, bsu) - c.AssertNoErr(err) + err = testImplFunc(c, bsc) + c.Assert(err, chk.IsNil) } } -func getContainerURLWithSAS(c asserter, credential azblob.SharedKeyCredential, containerName string) azblob.ContainerURL { - sasQueryParams, err := azblob.BlobSASSignatureValues{ - Protocol: azblob.SASProtocolHTTPS, - ExpiryTime: time.Now().UTC().Add(48 * time.Hour), - ContainerName: containerName, - Permissions: azblob.ContainerSASPermissions{Read: true, Add: true, Write: true, Create: true, Delete: true, List: true, Tag: true}.String(), - }.NewSASQueryParameters(&credential) +func getContainerURLWithSAS(c asserter, credential *blob.SharedKeyCredential, containerName string) *container.Client { + rawURL := fmt.Sprintf("https://%s.blob.core.windows.net/%s", + credential.AccountName(), containerName) + cc, err := container.NewClientWithSharedKeyCredential(rawURL, credential, nil) c.AssertNoErr(err) - // construct the url from scratch - qp := sasQueryParams.Encode() - rawURL := fmt.Sprintf("https://%s.blob.core.windows.net/%s?%s", - credential.AccountName(), containerName, qp) - - // convert the raw url and validate it was parsed successfully - fullURL, err := url.Parse(rawURL) + sasURL, err := cc.GetSASURL(blobsas.ContainerPermissions{Read: true, Add: true, Write: true, Create: true, Delete: true, List: true, Tag: true}, + time.Now().UTC().Add(48 * time.Hour), nil) c.AssertNoErr(err) - // TODO perhaps we need a global default pipeline - return azblob.NewContainerURL(*fullURL, azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})) + cc, err = container.NewClientWithNoCredential(sasURL, nil) + c.AssertNoErr(err) + return cc } -func getBlobServiceURLWithSAS(c asserter, credential azblob.SharedKeyCredential) azblob.ServiceURL { - sasQueryParams, err := azblob.AccountSASSignatureValues{ - Protocol: azblob.SASProtocolHTTPS, - ExpiryTime: time.Now().Add(48 * time.Hour), - Permissions: azblob.AccountSASPermissions{Read: true, List: true, Write: true, Delete: true, DeletePreviousVersion: true, Add: true, Create: true, Update: true, Process: true}.String(), - Services: azblob.AccountSASServices{File: true, Blob: true, Queue: true}.String(), - ResourceTypes: azblob.AccountSASResourceTypes{Service: true, Container: true, Object: true}.String(), - }.NewSASQueryParameters(&credential) +func getBlobServiceURLWithSAS(c asserter, credential *blob.SharedKeyCredential) *blobservice.Client { + rawURL := fmt.Sprintf("https://%s.blob.core.windows.net/", + credential.AccountName()) + bsc, err := blobservice.NewClientWithSharedKeyCredential(rawURL, credential, nil) c.AssertNoErr(err) - // construct the url from scratch - qp := sasQueryParams.Encode() - rawURL := fmt.Sprintf("https://%s.blob.core.windows.net/?%s", - credential.AccountName(), qp) - - // convert the raw url and validate it was parsed successfully - fullURL, err := url.Parse(rawURL) + sasURL, err := bsc.GetSASURL(blobsas.AccountResourceTypes{Service: true, Container: true, Object: true}, + blobsas.AccountPermissions{Read: true, List: true, Write: true, Delete: true, DeletePreviousVersion: true, Add: true, Create: true, Update: true, Process: true}, + time.Now().UTC().Add(48 * time.Hour), nil) c.AssertNoErr(err) - return azblob.NewServiceURL(*fullURL, azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})) + bsc, err = blobservice.NewClientWithNoCredential(sasURL, nil) + c.AssertNoErr(err) + return bsc } -func getFileServiceURLWithSAS(c asserter, credential azfile.SharedKeyCredential) azfile.ServiceURL { - sasQueryParams, err := azfile.AccountSASSignatureValues{ - Protocol: azfile.SASProtocolHTTPS, - ExpiryTime: time.Now().Add(48 * time.Hour), - Permissions: azfile.AccountSASPermissions{Read: true, List: true, Write: true, Delete: true, Add: true, Create: true, Update: true, Process: true}.String(), - Services: azfile.AccountSASServices{File: true, Blob: true, Queue: true}.String(), - ResourceTypes: azfile.AccountSASResourceTypes{Service: true, Container: true, Object: true}.String(), - }.NewSASQueryParameters(&credential) +func getFileServiceURLWithSAS(c asserter, credential *sharefile.SharedKeyCredential) *fileservice.Client { + rawURL := fmt.Sprintf("https://%s.file.core.windows.net/", + credential.AccountName()) + fsc, err := fileservice.NewClientWithSharedKeyCredential(rawURL, credential, nil) c.AssertNoErr(err) - - qp := sasQueryParams.Encode() - rawURL := fmt.Sprintf("https://%s.file.core.windows.net/?%s", credential.AccountName(), qp) - - fullURL, err := url.Parse(rawURL) + sasURL, err := fsc.GetSASURL(filesas.AccountResourceTypes{Service: true, Container: true, Object: true}, + filesas.AccountPermissions{Read: true, List: true, Write: true, Delete: true, Create: true}, + time.Now().UTC().Add(48 * time.Hour), nil) c.AssertNoErr(err) - return azfile.NewServiceURL(*fullURL, azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{})) + fsc, err = fileservice.NewClientWithNoCredential(sasURL, nil) + c.AssertNoErr(err) + return fsc } -func getShareURLWithSAS(c asserter, credential azfile.SharedKeyCredential, shareName string) azfile.ShareURL { - sasQueryParams, err := azfile.FileSASSignatureValues{ - Protocol: azfile.SASProtocolHTTPS, - ExpiryTime: time.Now().UTC().Add(48 * time.Hour), - ShareName: shareName, - Permissions: azfile.ShareSASPermissions{Read: true, Write: true, Create: true, Delete: true, List: true}.String(), - }.NewSASQueryParameters(&credential) +func getShareURLWithSAS(c asserter, credential *sharefile.SharedKeyCredential, shareName string) *share.Client { + rawURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", + credential.AccountName(), shareName) + sc, err := share.NewClientWithSharedKeyCredential(rawURL, credential, nil) c.AssertNoErr(err) - - // construct the url from scratch - qp := sasQueryParams.Encode() - rawURL := fmt.Sprintf("https://%s.file.core.windows.net/%s?%s", - credential.AccountName(), shareName, qp) - - // convert the raw url and validate it was parsed successfully - fullURL, err := url.Parse(rawURL) + sasURL, err := sc.GetSASURL(filesas.SharePermissions{Read: true, Write: true, Create: true, Delete: true, List: true}, + time.Now().UTC().Add(48 * time.Hour), nil) c.AssertNoErr(err) - // TODO perhaps we need a global default pipeline - return azfile.NewShareURL(*fullURL, azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{})) + sc, err = share.NewClientWithNoCredential(sasURL, nil) + c.AssertNoErr(err) + return sc } func getAdlsServiceURLWithSAS(c asserter, credential azbfs.SharedKeyCredential) azbfs.ServiceURL { sasQueryParams, err := azbfs.AccountSASSignatureValues{ Protocol: azbfs.SASProtocolHTTPS, ExpiryTime: time.Now().Add(48 * time.Hour), - Permissions: azfile.AccountSASPermissions{Read: true, List: true, Write: true, Delete: true, Add: true, Create: true, Update: true, Process: true}.String(), - Services: azfile.AccountSASServices{File: true, Blob: true, Queue: true}.String(), - ResourceTypes: azfile.AccountSASResourceTypes{Service: true, Container: true, Object: true}.String(), + Permissions: "rwdlacup", + Services: "bqf", + ResourceTypes: "sco", }.NewSASQueryParameters(&credential) c.AssertNoErr(err) @@ -607,7 +578,6 @@ func (checker *stringContainsChecker) Check(params []interface{}, _ []string) (r } func GetContentTypeMap(fileExtensions []string) map[string]string { - extensionsMap := make(map[string]string) for _, ext := range fileExtensions { if guessedType := mime.TypeByExtension(ext); guessedType != "" { diff --git a/e2etest/managedDisks.go b/e2etest/managedDisks.go index e7166dfb6..d42e47dfc 100644 --- a/e2etest/managedDisks.go +++ b/e2etest/managedDisks.go @@ -55,7 +55,7 @@ func (config *ManagedDiskConfig) GetAccess() (*url.URL, error) { return nil, fmt.Errorf("failed to initialize request: %w", err) } - req.Header["Authorization"] = []string{"Bearer " + config.oauth.OAuthToken()} + req.Header["Authorization"] = []string{"Bearer " + config.oauth.Token} req.Header["Content-Type"] = []string{"application/json; charset=utf-8"} req.Header["Accept"] = []string{"application/json; charset=utf-8"} @@ -117,7 +117,7 @@ func (config *ManagedDiskConfig) RevokeAccess() error { return fmt.Errorf("failed to initialize request: %w", err) } - req.Header["Authorization"] = []string{"Bearer " + config.oauth.OAuthToken()} + req.Header["Authorization"] = []string{"Bearer " + config.oauth.Token} resp, err := http.DefaultClient.Do(req) if err != nil { diff --git a/e2etest/pointers.go b/e2etest/pointers.go deleted file mode 100644 index 15aa1fea8..000000000 --- a/e2etest/pointers.go +++ /dev/null @@ -1,6 +0,0 @@ -package e2etest - -// todo: upgrade to go 1.18 and use generics -func BoolPointer(b bool) *bool { - return &b -} diff --git a/e2etest/runner.go b/e2etest/runner.go index ff580fafc..5948ef5ac 100644 --- a/e2etest/runner.go +++ b/e2etest/runner.go @@ -78,7 +78,6 @@ func (t *TestRunner) SetAllFlags(p params, o Operation) { t.flags[key] = fmt.Sprintf(format, value) } - set("log-level", "debug", "debug") // TODO: TODO: nakulkar-msft there will be many more to add here set("recursive", p.recursive, false) @@ -138,6 +137,7 @@ func (t *TestRunner) computeArgs() []string { for key, value := range t.flags { args = append(args, fmt.Sprintf("--%s=%s", key, value)) } + args = append(args, "--log-level=DEBUG") return append(args, "--output-type=json") } diff --git a/e2etest/scenario_helpers.go b/e2etest/scenario_helpers.go index 13133ee92..ece983223 100644 --- a/e2etest/scenario_helpers.go +++ b/e2etest/scenario_helpers.go @@ -28,6 +28,19 @@ import ( "crypto/md5" "encoding/base64" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob" + blobservice "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/directory" + sharefile "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + fileservice "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "github.com/Azure/azure-storage-azcopy/v10/ste" "github.com/google/uuid" "io" "net/url" @@ -44,8 +57,6 @@ import ( "github.com/minio/minio-go" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" ) const defaultFileSize = 1024 @@ -262,7 +273,7 @@ func (s scenarioHelper) generateCommonRemoteScenarioForLocal(c asserter, dirPath return } -func (scenarioHelper) generateCommonRemoteScenarioForBlob(c asserter, containerURL azblob.ContainerURL, prefix string) (blobList []string) { +func (scenarioHelper) generateCommonRemoteScenarioForBlob(c asserter, containerClient *container.Client, prefix string) (blobList []string) { // make 50 blobs with random names // 10 of them at the top level // 10 of them in sub dir "sub1" @@ -272,11 +283,11 @@ func (scenarioHelper) generateCommonRemoteScenarioForBlob(c asserter, containerU blobList = make([]string, 50) for i := 0; i < 10; i++ { - _, blobName1 := createNewBlockBlob(c, containerURL, prefix+"top") - _, blobName2 := createNewBlockBlob(c, containerURL, prefix+"sub1/") - _, blobName3 := createNewBlockBlob(c, containerURL, prefix+"sub2/") - _, blobName4 := createNewBlockBlob(c, containerURL, prefix+"sub1/sub3/sub5/") - _, blobName5 := createNewBlockBlob(c, containerURL, prefix+specialNames[i]) + _, blobName1 := createNewBlockBlob(c, containerClient, prefix+"top") + _, blobName2 := createNewBlockBlob(c, containerClient, prefix+"sub1/") + _, blobName3 := createNewBlockBlob(c, containerClient, prefix+"sub2/") + _, blobName4 := createNewBlockBlob(c, containerClient, prefix+"sub1/sub3/sub5/") + _, blobName5 := createNewBlockBlob(c, containerClient, prefix+specialNames[i]) blobList[5*i] = blobName1 blobList[5*i+1] = blobName2 @@ -312,15 +323,15 @@ func (scenarioHelper) generateCommonRemoteScenarioForBlobFS(c asserter, filesyst return } -func (scenarioHelper) generateCommonRemoteScenarioForAzureFile(c asserter, shareURL azfile.ShareURL, prefix string) (fileList []string) { +func (scenarioHelper) generateCommonRemoteScenarioForAzureFile(c asserter, shareClient *share.Client, prefix string) (fileList []string) { fileList = make([]string, 50) for i := 0; i < 10; i++ { - _, fileName1 := createNewAzureFile(c, shareURL, prefix+"top") - _, fileName2 := createNewAzureFile(c, shareURL, prefix+"sub1/") - _, fileName3 := createNewAzureFile(c, shareURL, prefix+"sub2/") - _, fileName4 := createNewAzureFile(c, shareURL, prefix+"sub1/sub3/sub5/") - _, fileName5 := createNewAzureFile(c, shareURL, prefix+specialNames[i]) + _, fileName1 := createNewAzureFile(c, shareClient, prefix+"top") + _, fileName2 := createNewAzureFile(c, shareClient, prefix+"sub1/") + _, fileName3 := createNewAzureFile(c, shareClient, prefix+"sub2/") + _, fileName4 := createNewAzureFile(c, shareClient, prefix+"sub1/sub3/sub5/") + _, fileName5 := createNewAzureFile(c, shareClient, prefix+specialNames[i]) fileList[5*i] = fileName1 fileList[5*i+1] = fileName2 @@ -334,13 +345,13 @@ func (scenarioHelper) generateCommonRemoteScenarioForAzureFile(c asserter, share return } -func (s scenarioHelper) generateBlobContainersAndBlobsFromLists(c asserter, serviceURL azblob.ServiceURL, containerList []string, blobList []*testObject) { +func (s scenarioHelper) generateBlobContainersAndBlobsFromLists(c asserter, serviceClient *blobservice.Client, containerList []string, blobList []*testObject) { for _, containerName := range containerList { - curl := serviceURL.NewContainerURL(containerName) - _, err := curl.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone) + curl := serviceClient.NewContainerClient(containerName) + _, err := curl.Create(ctx, nil) c.AssertNoErr(err) s.generateBlobsFromList(c, &generateBlobFromListOptions{ - containerURL: curl, + containerClient: curl, generateFromListOptions: generateFromListOptions{ fs: blobList, defaultSize: defaultStringFileSize, @@ -349,14 +360,14 @@ func (s scenarioHelper) generateBlobContainersAndBlobsFromLists(c asserter, serv } } -func (s scenarioHelper) generateFileSharesAndFilesFromLists(c asserter, serviceURL azfile.ServiceURL, shareList []string, fileList []*testObject) { +func (s scenarioHelper) generateFileSharesAndFilesFromLists(c asserter, serviceClient *fileservice.Client, shareList []string, fileList []*testObject) { for _, shareName := range shareList { - sURL := serviceURL.NewShareURL(shareName) - _, err := sURL.Create(ctx, azfile.Metadata{}, 0) + sURL := serviceClient.NewShareClient(shareName) + _, err := sURL.Create(ctx, nil) c.AssertNoErr(err) s.generateAzureFilesFromList(c, &generateAzureFilesFromListOptions{ - shareURL: sURL, + shareClient: sURL, fileList: fileList, defaultSize: defaultStringFileSize, }) @@ -390,11 +401,11 @@ type generateFromListOptions struct { } type generateBlobFromListOptions struct { - rawSASURL url.URL - containerURL azblob.ContainerURL - cpkInfo common.CpkInfo - cpkScopeInfo common.CpkScopeInfo - accessTier azblob.AccessTierType + rawSASURL url.URL + containerClient *container.Client + cpkInfo *blob.CPKInfo + cpkScopeInfo *blob.CPKScopeInfo + accessTier *blob.AccessTier generateFromListOptions } @@ -403,32 +414,32 @@ func (scenarioHelper) generateBlobsFromList(c asserter, options *generateBlobFro for _, b := range options.fs { switch b.creationProperties.entityType { case common.EEntityType.Folder(): // it's fine to create folders even when we're not explicitly testing them, UNLESS we're testing CPK-- AzCopy can't properly pick that up! - if !options.cpkInfo.Empty() || b.name == "" { + if options.cpkInfo != nil || b.name == "" { continue // can't write root, and can't handle dirs with CPK } if b.creationProperties.nameValueMetadata == nil { - b.creationProperties.nameValueMetadata = map[string]string{} + b.creationProperties.nameValueMetadata = map[string]*string{} } b.body = make([]byte, 0) - b.creationProperties.nameValueMetadata[common.POSIXFolderMeta] = "true" + b.creationProperties.nameValueMetadata[common.POSIXFolderMeta] = to.Ptr("true") mode := uint64(os.FileMode(common.DEFAULT_FILE_PERM) | os.ModeDir) - b.creationProperties.nameValueMetadata[common.POSIXModeMeta] = strconv.FormatUint(mode, 10) + b.creationProperties.nameValueMetadata[common.POSIXModeMeta] = to.Ptr(strconv.FormatUint(mode, 10)) b.creationProperties.posixProperties.AddToMetadata(b.creationProperties.nameValueMetadata) case common.EEntityType.Symlink(): if b.creationProperties.nameValueMetadata == nil { - b.creationProperties.nameValueMetadata = map[string]string{} + b.creationProperties.nameValueMetadata = map[string]*string{} } b.body = []byte(*b.creationProperties.symlinkTarget) - b.creationProperties.nameValueMetadata[common.POSIXSymlinkMeta] = "true" + b.creationProperties.nameValueMetadata[common.POSIXSymlinkMeta] = to.Ptr("true") mode := uint64(os.FileMode(common.DEFAULT_FILE_PERM) | os.ModeSymlink) - b.creationProperties.nameValueMetadata[common.POSIXModeMeta] = strconv.FormatUint(mode, 10) + b.creationProperties.nameValueMetadata[common.POSIXModeMeta] = to.Ptr(strconv.FormatUint(mode, 10)) b.creationProperties.posixProperties.AddToMetadata(b.creationProperties.nameValueMetadata) default: if b.creationProperties.nameValueMetadata == nil { - b.creationProperties.nameValueMetadata = map[string]string{} + b.creationProperties.nameValueMetadata = map[string]*string{} } b.creationProperties.posixProperties.AddToMetadata(b.creationProperties.nameValueMetadata) @@ -443,14 +454,17 @@ func (scenarioHelper) generateBlobsFromList(c asserter, options *generateBlobFro } } ad := blobResourceAdapter{b} - var reader *bytes.Reader + var reader io.ReadSeekCloser + var size int var sourceData []byte if b.body != nil { - reader = bytes.NewReader(b.body) + reader = streaming.NopCloser(bytes.NewReader(b.body)) sourceData = b.body + size = len(b.body) } else { reader, sourceData = getRandomDataAndReader(b.creationProperties.sizeBytes(c, options.defaultSize)) b.body = sourceData // set body + size = len(b.body) } // Setting content MD5 @@ -462,7 +476,8 @@ func (scenarioHelper) generateBlobsFromList(c asserter, options *generateBlobFro ad.obj.creationProperties.contentHeaders.contentMD5 = contentMD5[:] } - tags := ad.toBlobTags() + tags := ad.obj.creationProperties.blobTags + metadata := ad.obj.creationProperties.nameValueMetadata if options.accountType == EAccountType.HierarchicalNamespaceEnabled() { tags = nil @@ -474,71 +489,82 @@ func (scenarioHelper) generateBlobsFromList(c asserter, options *generateBlobFro switch b.creationProperties.blobType { case common.EBlobType.BlockBlob(), common.EBlobType.Detect(): - bb := options.containerURL.NewBlockBlobURL(b.name) + bb := options.containerClient.NewBlockBlobClient(b.name) - if options.accessTier == "" { - options.accessTier = azblob.DefaultAccessTier - } - - if reader.Size() > 0 { + if size > 0 { // to prevent the service from erroring out with an improper MD5, we opt to commit a block, then the list. blockID := base64.StdEncoding.EncodeToString([]byte(uuid.NewString())) - sResp, err := bb.StageBlock(ctx, - blockID, - reader, - azblob.LeaseAccessConditions{}, - nil, - common.ToClientProvidedKeyOptions(options.cpkInfo, options.cpkScopeInfo)) + _, err = bb.StageBlock(ctx, blockID, reader, + &blockblob.StageBlockOptions{ + CPKInfo: options.cpkInfo, + CPKScopeInfo: options.cpkScopeInfo, + }) c.AssertNoErr(err) - c.Assert(sResp.StatusCode(), equals(), 201) - cResp, err := bb.CommitBlockList(ctx, + _, err = bb.CommitBlockList(ctx, []string{blockID}, - headers, - ad.toMetadata(), - azblob.BlobAccessConditions{}, - options.accessTier, - ad.toBlobTags(), - common.ToClientProvidedKeyOptions(options.cpkInfo, options.cpkScopeInfo), - azblob.ImmutabilityPolicyOptions{}, - ) + &blockblob.CommitBlockListOptions{ + HTTPHeaders: headers, + Metadata: metadata, + Tier: options.accessTier, + Tags: tags, + CPKInfo: options.cpkInfo, + CPKScopeInfo: options.cpkScopeInfo, + }) c.AssertNoErr(err) - c.Assert(cResp.StatusCode(), equals(), 201) } else { // todo: invalid MD5 on empty blob is impossible like this, but it's doubtful we'll need to support it. // handle empty blobs - cResp, err := bb.Upload(ctx, - reader, - headers, - ad.toMetadata(), - azblob.BlobAccessConditions{}, - options.accessTier, - ad.toBlobTags(), - common.ToClientProvidedKeyOptions(options.cpkInfo, options.cpkScopeInfo), - azblob.ImmutabilityPolicyOptions{}) + _, err := bb.Upload(ctx, reader, + &blockblob.UploadOptions{ + HTTPHeaders: headers, + Metadata: metadata, + Tier: options.accessTier, + Tags: tags, + CPKInfo: options.cpkInfo, + CPKScopeInfo: options.cpkScopeInfo, + }) c.AssertNoErr(err) - c.Assert(cResp.StatusCode(), equals(), 201) } case common.EBlobType.PageBlob(): - pb := options.containerURL.NewPageBlobURL(b.name) - cResp, err := pb.Create(ctx, reader.Size(), 0, headers, ad.toMetadata(), azblob.BlobAccessConditions{}, azblob.DefaultPremiumBlobAccessTier, tags, common.ToClientProvidedKeyOptions(options.cpkInfo, options.cpkScopeInfo), azblob.ImmutabilityPolicyOptions{}) + pb := options.containerClient.NewPageBlobClient(b.name) + _, err := pb.Create(ctx, int64(size), + &pageblob.CreateOptions{ + SequenceNumber: to.Ptr(int64(0)), + HTTPHeaders: headers, + Metadata: metadata, + Tags: tags, + CPKInfo: options.cpkInfo, + CPKScopeInfo: options.cpkScopeInfo, + }) c.AssertNoErr(err) - c.Assert(cResp.StatusCode(), equals(), 201) - pbUpResp, err := pb.UploadPages(ctx, 0, reader, azblob.PageBlobAccessConditions{}, nil, common.ToClientProvidedKeyOptions(options.cpkInfo, options.cpkScopeInfo)) + _, err = pb.UploadPages(ctx, reader, blob.HTTPRange{Offset: 0, Count: int64(size)}, + &pageblob.UploadPagesOptions{ + CPKInfo: options.cpkInfo, + CPKScopeInfo: options.cpkScopeInfo, + }) c.AssertNoErr(err) - c.Assert(pbUpResp.StatusCode(), equals(), 201) case common.EBlobType.AppendBlob(): - ab := options.containerURL.NewAppendBlobURL(b.name) - cResp, err := ab.Create(ctx, headers, ad.toMetadata(), azblob.BlobAccessConditions{}, tags, common.ToClientProvidedKeyOptions(options.cpkInfo, options.cpkScopeInfo), azblob.ImmutabilityPolicyOptions{}) + ab := options.containerClient.NewAppendBlobClient(b.name) + _, err := ab.Create(ctx, + &appendblob.CreateOptions{ + HTTPHeaders: headers, + Metadata: metadata, + Tags: tags, + CPKInfo: options.cpkInfo, + CPKScopeInfo: options.cpkScopeInfo, + }) c.AssertNoErr(err) - c.Assert(cResp.StatusCode(), equals(), 201) - abUpResp, err := ab.AppendBlock(ctx, reader, azblob.AppendBlobAccessConditions{}, nil, common.ToClientProvidedKeyOptions(options.cpkInfo, options.cpkScopeInfo)) + _, err = ab.AppendBlock(ctx, reader, + &appendblob.AppendBlockOptions{ + CPKInfo: options.cpkInfo, + CPKScopeInfo: options.cpkScopeInfo, + }) c.AssertNoErr(err) - c.Assert(abUpResp.StatusCode(), equals(), 201) } if b.creationProperties.adlsPermissionsACL != nil { @@ -574,11 +600,12 @@ func (scenarioHelper) generateBlobsFromList(c asserter, options *generateBlobFro time.Sleep(time.Millisecond * 1050) } -func (s scenarioHelper) enumerateContainerBlobProperties(a asserter, containerURL azblob.ContainerURL) map[string]*objectProperties { +func (s scenarioHelper) enumerateContainerBlobProperties(a asserter, containerClient *container.Client) map[string]*objectProperties { result := make(map[string]*objectProperties) - for marker := (azblob.Marker{}); marker.NotDone(); { - listBlob, err := containerURL.ListBlobsFlatSegment(context.TODO(), marker, azblob.ListBlobsSegmentOptions{Details: azblob.BlobListingDetails{Metadata: true, Tags: true}}) + pager := containerClient.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{Include: container.ListBlobsInclude{Metadata: true, Tags: true}}) + for pager.More() { + listBlob, err := pager.NextPage(context.TODO()) a.AssertNoErr(err) for _, blobInfo := range listBlob.Segment.BlobItems { @@ -594,7 +621,7 @@ func (s scenarioHelper) enumerateContainerBlobProperties(a asserter, containerUR contentType: bp.ContentType, contentMD5: bp.ContentMD5, } - md := map[string]string(blobInfo.Metadata) + md := blobInfo.Metadata props := objectProperties{ entityType: common.EEntityType.File(), // todo: posix properties includes folders @@ -602,118 +629,101 @@ func (s scenarioHelper) enumerateContainerBlobProperties(a asserter, containerUR contentHeaders: &h, nameValueMetadata: md, creationTime: bp.CreationTime, - lastWriteTime: &bp.LastModified, - cpkInfo: &common.CpkInfo{EncryptionKeySha256: bp.CustomerProvidedKeySha256}, - cpkScopeInfo: &common.CpkScopeInfo{EncryptionScope: bp.EncryptionScope}, - adlsPermissionsACL: bp.ACL, + lastWriteTime: bp.LastModified, + cpkInfo: &blob.CPKInfo{EncryptionKeySHA256: bp.CustomerProvidedKeySHA256}, + cpkScopeInfo: &blob.CPKScopeInfo{EncryptionScope: bp.EncryptionScope}, + // TODO : Return ACL in list + //adlsPermissionsACL: bp.ACL, // smbAttributes and smbPermissions don't exist in blob } if blobInfo.BlobTags != nil { blobTagsMap := common.BlobTags{} for _, blobTag := range blobInfo.BlobTags.BlobTagSet { - blobTagsMap[url.QueryEscape(blobTag.Key)] = url.QueryEscape(blobTag.Value) + blobTagsMap[url.QueryEscape(*blobTag.Key)] = url.QueryEscape(*blobTag.Value) } props.blobTags = blobTagsMap } - props.blobType = common.FromAzBlobType(blobInfo.Properties.BlobType) + switch *blobInfo.Properties.BlobType { + case blob.BlobTypeBlockBlob: + props.blobType = common.EBlobType.BlockBlob() + case blob.BlobTypePageBlob: + props.blobType = common.EBlobType.PageBlob() + case blob.BlobTypeAppendBlob: + props.blobType = common.EBlobType.AppendBlob() + default: + props.blobType = common.EBlobType.Detect() + } - result[relativePath] = &props + result[*relativePath] = &props } - - marker = listBlob.NextMarker } return result } func (s scenarioHelper) downloadBlobContent(a asserter, options downloadContentOptions) []byte { - blobURL := options.containerURL.NewBlobURL(options.resourceRelPath) - cpk := common.ToClientProvidedKeyOptions(options.cpkInfo, options.cpkScopeInfo) - downloadResp, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, cpk) + blobClient := options.containerClient.NewBlobClient(options.resourceRelPath) + downloadResp, err := blobClient.DownloadStream(ctx, &blob.DownloadStreamOptions{CPKInfo: options.cpkInfo, CPKScopeInfo: options.cpkScopeInfo}) a.AssertNoErr(err) - retryReader := downloadResp.Body(azblob.RetryReaderOptions{}) - defer retryReader.Close() - - destData, err := io.ReadAll(retryReader) + destData, err := io.ReadAll(downloadResp.Body) + defer downloadResp.Body.Close() a.AssertNoErr(err) - return destData[:] + return destData } -func (scenarioHelper) generatePageBlobsFromList(c asserter, containerURL azblob.ContainerURL, blobList []string, data string) { +func (scenarioHelper) generatePageBlobsFromList(c asserter, containerClient *container.Client, blobList []string, data string) { for _, blobName := range blobList { // Create the blob (PUT blob) - blob := containerURL.NewPageBlobURL(blobName) - cResp, err := blob.Create(ctx, + bc := containerClient.NewPageBlobClient(blobName) + _, err := bc.Create(ctx, int64(len(data)), - 0, - azblob.BlobHTTPHeaders{ - ContentType: "text/random", - }, - azblob.Metadata{}, - azblob.BlobAccessConditions{}, - azblob.DefaultPremiumBlobAccessTier, - nil, - azblob.ClientProvidedKeyOptions{}, - azblob.ImmutabilityPolicyOptions{}, - ) + &pageblob.CreateOptions{ + SequenceNumber: to.Ptr(int64(0)), + HTTPHeaders: &blob.HTTPHeaders{BlobContentType: to.Ptr("text/random")}, + }) c.AssertNoErr(err) - c.Assert(cResp.StatusCode(), equals(), 201) // Create the page (PUT page) - uResp, err := blob.UploadPages(ctx, - 0, - strings.NewReader(data), - azblob.PageBlobAccessConditions{}, - nil, - azblob.ClientProvidedKeyOptions{}, - ) + _, err = bc.UploadPages(ctx, + streaming.NopCloser(strings.NewReader(data)), + blob.HTTPRange{Offset: 0, Count: int64(len(data))}, + nil) c.AssertNoErr(err) - c.Assert(uResp.StatusCode(), equals(), 201) } // sleep a bit so that the blobs' lmts are guaranteed to be in the past time.Sleep(time.Millisecond * 1050) } -func (scenarioHelper) generateAppendBlobsFromList(c asserter, containerURL azblob.ContainerURL, blobList []string, data string) { +func (scenarioHelper) generateAppendBlobsFromList(c asserter, containerClient *container.Client, blobList []string, data string) { for _, blobName := range blobList { // Create the blob (PUT blob) - blob := containerURL.NewAppendBlobURL(blobName) - cResp, err := blob.Create(ctx, - azblob.BlobHTTPHeaders{ - ContentType: "text/random", - }, - azblob.Metadata{}, - azblob.BlobAccessConditions{}, - nil, - azblob.ClientProvidedKeyOptions{}, - azblob.ImmutabilityPolicyOptions{}, - ) + bc := containerClient.NewAppendBlobClient(blobName) + _, err := bc.Create(ctx, + &appendblob.CreateOptions{ + HTTPHeaders: &blob.HTTPHeaders{BlobContentType: to.Ptr("text/random")}, + }) c.AssertNoErr(err) - c.Assert(cResp.StatusCode(), equals(), 201) // Append a block (PUT block) - uResp, err := blob.AppendBlock(ctx, - strings.NewReader(data), - azblob.AppendBlobAccessConditions{}, - nil, azblob.ClientProvidedKeyOptions{}) + _, err = bc.AppendBlock(ctx, streaming.NopCloser(strings.NewReader(data)), nil) c.AssertNoErr(err) - c.Assert(uResp.StatusCode(), equals(), 201) } // sleep a bit so that the blobs' lmts are guaranteed to be in the past time.Sleep(time.Millisecond * 1050) } -func (scenarioHelper) generateBlockBlobWithAccessTier(c asserter, containerURL azblob.ContainerURL, blobName string, accessTier azblob.AccessTierType) { - blob := containerURL.NewBlockBlobURL(blobName) - cResp, err := blob.Upload(ctx, strings.NewReader(blockBlobDefaultData), azblob.BlobHTTPHeaders{}, - nil, azblob.BlobAccessConditions{}, accessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) +func (scenarioHelper) generateBlockBlobWithAccessTier(c asserter, containerClient *container.Client, blobName string, accessTier *blob.AccessTier) { + bc := containerClient.NewBlockBlobClient(blobName) + _, err := bc.Upload(ctx, streaming.NopCloser(strings.NewReader(blockBlobDefaultData)), + &blockblob.UploadOptions{ + Tier: accessTier, + }) c.AssertNoErr(err) - c.Assert(cResp.StatusCode(), equals(), 201) } // create the demanded objects @@ -727,10 +737,10 @@ func (scenarioHelper) generateObjects(c asserter, client *minio.Client, bucketNa } // create the demanded files -func (scenarioHelper) generateFlatFiles(c asserter, shareURL azfile.ShareURL, fileList []string) { +func (scenarioHelper) generateFlatFiles(c asserter, shareClient *share.Client, fileList []string) { for _, fileName := range fileList { - file := shareURL.NewRootDirectoryURL().NewFileURL(fileName) - err := azfile.UploadBufferToAzureFile(ctx, []byte(fileDefaultData), file, azfile.UploadToAzureFileOptions{}) + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + err := fileClient.UploadBuffer(ctx, []byte(fileDefaultData), nil) c.AssertNoErr(err) } @@ -776,7 +786,7 @@ func (scenarioHelper) generateCommonRemoteScenarioForS3(c asserter, client *mini } type generateAzureFilesFromListOptions struct { - shareURL azfile.ShareURL + shareClient *share.Client fileList []*testObject defaultSize string } @@ -787,29 +797,32 @@ func (scenarioHelper) generateAzureFilesFromList(c asserter, options *generateAz ad := filesResourceAdapter{f} if f.isFolder() { // make sure the dir exists - file := options.shareURL.NewRootDirectoryURL().NewFileURL(path.Join(f.name, "dummyChild")) + file := options.shareClient.NewRootDirectoryClient().NewFileClient(path.Join(f.name, "dummyChild")) generateParentsForAzureFile(c, file) - dir := options.shareURL.NewRootDirectoryURL().NewDirectoryURL(f.name) + dir := options.shareClient.NewRootDirectoryClient().NewSubdirectoryClient(f.name) // set its metadata if any if f.creationProperties.nameValueMetadata != nil { - _, err := dir.SetMetadata(context.TODO(), ad.toMetadata()) + _, err := dir.SetMetadata(context.TODO(), &directory.SetMetadataOptions{Metadata: ad.obj.creationProperties.nameValueMetadata}) c.AssertNoErr(err) } if f.creationProperties.smbPermissionsSddl != nil || f.creationProperties.smbAttributes != nil || f.creationProperties.lastWriteTime != nil { - _, err := dir.SetProperties(ctx, ad.toHeaders(c, options.shareURL).SMBProperties) + _, err := dir.SetProperties(ctx, &directory.SetPropertiesOptions{ + FileSMBProperties: ad.toSMBProperties(c), + FilePermissions: ad.toPermissions(c, options.shareClient), + }) c.AssertNoErr(err) if f.creationProperties.smbPermissionsSddl != nil { - prop, err := dir.GetProperties(ctx) + prop, err := dir.GetProperties(ctx, nil) c.AssertNoErr(err) - perm, err := options.shareURL.GetPermission(ctx, prop.FilePermissionKey()) + perm, err := options.shareClient.GetPermission(ctx, *prop.FilePermissionKey, nil) c.AssertNoErr(err) - dest, _ := sddl.ParseSDDL(perm.Permission) + dest, _ := sddl.ParseSDDL(*perm.Permission) source, _ := sddl.ParseSDDL(*f.creationProperties.smbPermissionsSddl) c.Assert(dest.Compare(source), equals(), true) @@ -826,22 +839,23 @@ func (scenarioHelper) generateAzureFilesFromList(c asserter, options *generateAz // TODO: I'm pretty sure we don't prserve lastWritetime or contentProperties (headers) for folders, so the above if statement doesn't test those // Is that the correct decision? } else if f.creationProperties.entityType == common.EEntityType.File() { - file := options.shareURL.NewRootDirectoryURL().NewFileURL(f.name) + fileClient := options.shareClient.NewRootDirectoryClient().NewFileClient(f.name) // create parents first - generateParentsForAzureFile(c, file) + generateParentsForAzureFile(c, fileClient) // create the file itself fileSize := int64(f.creationProperties.sizeBytes(c, options.defaultSize)) - var contentR *bytes.Reader + var contentR io.ReadSeekCloser var contentD []byte if f.body != nil { - contentR = bytes.NewReader(f.body) + contentR = streaming.NopCloser(bytes.NewReader(f.body)) contentD = f.body - fileSize = contentR.Size() + fileSize = int64(len(f.body)) } else { contentR, contentD = getRandomDataAndReader(int(fileSize)) f.body = contentD + fileSize = int64(len(f.body)) } if f.creationProperties.contentHeaders == nil { f.creationProperties.contentHeaders = &contentHeaders{} @@ -851,13 +865,15 @@ func (scenarioHelper) generateAzureFilesFromList(c asserter, options *generateAz f.creationProperties.contentHeaders.contentMD5 = contentMD5[:] } - headers := ad.toHeaders(c, options.shareURL) - - cResp, err := file.Create(ctx, fileSize, headers, ad.toMetadata()) + _, err := fileClient.Create(ctx, fileSize, &sharefile.CreateOptions{ + SMBProperties: ad.toSMBProperties(c), + Permissions: ad.toPermissions(c, options.shareClient), + HTTPHeaders: ad.toHeaders(), + Metadata: ad.obj.creationProperties.nameValueMetadata, + }) c.AssertNoErr(err) - c.Assert(cResp.StatusCode(), equals(), 201) - _, err = file.UploadRange(context.Background(), 0, contentR, nil) + _, err = fileClient.UploadRange(context.Background(), 0, contentR, nil) if err == nil { c.Failed() } @@ -875,17 +891,21 @@ func (scenarioHelper) generateAzureFilesFromList(c asserter, options *generateAz */ - _, err := file.SetHTTPHeaders(ctx, headers) + _, err := fileClient.SetHTTPHeaders(ctx, &sharefile.SetHTTPHeadersOptions{ + HTTPHeaders: ad.toHeaders(), + SMBProperties: ad.toSMBProperties(c), + Permissions: ad.toPermissions(c, options.shareClient), + }) c.AssertNoErr(err) if f.creationProperties.smbPermissionsSddl != nil { - prop, err := file.GetProperties(ctx) + prop, err := fileClient.GetProperties(ctx, nil) c.AssertNoErr(err) - perm, err := options.shareURL.GetPermission(ctx, prop.FilePermissionKey()) + perm, err := options.shareClient.GetPermission(ctx, *prop.FilePermissionKey, nil) c.AssertNoErr(err) - dest, _ := sddl.ParseSDDL(perm.Permission) + dest, _ := sddl.ParseSDDL(*perm.Permission) source, _ := sddl.ParseSDDL(*f.creationProperties.smbPermissionsSddl) c.Assert(dest.Compare(source), equals(), true) @@ -902,126 +922,117 @@ func (scenarioHelper) generateAzureFilesFromList(c asserter, options *generateAz time.Sleep(time.Millisecond * 1050) } -func (s scenarioHelper) enumerateShareFileProperties(a asserter, shareURL azfile.ShareURL) map[string]*objectProperties { - var dirQ []azfile.DirectoryURL +func (s scenarioHelper) enumerateShareFileProperties(a asserter, sc *share.Client) map[string]*objectProperties { + var dirQ []*directory.Client result := make(map[string]*objectProperties) - root := shareURL.NewRootDirectoryURL() - rootProps, err := root.GetProperties(ctx) + root := sc.NewRootDirectoryClient() + rootProps, err := root.GetProperties(ctx, nil) + a.AssertNoErr(err) + rootAttr, err := sharefile.ParseNTFSFileAttributes(rootProps.FileAttributes) a.AssertNoErr(err) - rootAttr := uint32(azfile.ParseFileAttributeFlagsString(rootProps.FileAttributes())) var rootPerm *string - if permKey := rootProps.FilePermissionKey(); permKey != "" { - sharePerm, err := shareURL.GetPermission(ctx, permKey) + if permKey := rootProps.FilePermissionKey; permKey != nil { + sharePerm, err := sc.GetPermission(ctx, *permKey, nil) a.AssertNoErr(err, "Failed to get permissions from key") - rootPerm = &sharePerm.Permission + rootPerm = sharePerm.Permission } result[""] = &objectProperties{ entityType: common.EEntityType.Folder(), smbPermissionsSddl: rootPerm, - smbAttributes: &rootAttr, + smbAttributes: to.Ptr(ste.FileAttributesToUint32(*rootAttr)), } dirQ = append(dirQ, root) for i := 0; i < len(dirQ); i++ { currentDirURL := dirQ[i] - for marker := (azfile.Marker{}); marker.NotDone(); { - lResp, err := currentDirURL.ListFilesAndDirectoriesSegment(context.TODO(), marker, azfile.ListFilesAndDirectoriesOptions{}) + pager := currentDirURL.NewListFilesAndDirectoriesPager(nil) + for pager.More() { + lResp, err := pager.NextPage(context.TODO()) a.AssertNoErr(err) // Process the files and folders we listed - for _, fileInfo := range lResp.FileItems { - fileURL := currentDirURL.NewFileURL(fileInfo.Name) - fProps, err := fileURL.GetProperties(context.TODO()) + for _, fileInfo := range lResp.Segment.Files { + fileURL := currentDirURL.NewFileClient(*fileInfo.Name) + fProps, err := fileURL.GetProperties(context.TODO(), nil) a.AssertNoErr(err) // Construct the properties object - fileSize := fProps.ContentLength() - creationTime, err := time.Parse(azfile.ISO8601, fProps.FileCreationTime()) - a.AssertNoErr(err) - lastWriteTime, err := time.Parse(azfile.ISO8601, fProps.FileLastWriteTime()) - a.AssertNoErr(err) - contentHeader := fProps.NewHTTPHeaders() h := contentHeaders{ - cacheControl: &contentHeader.CacheControl, - contentDisposition: &contentHeader.ContentDisposition, - contentEncoding: &contentHeader.ContentEncoding, - contentLanguage: &contentHeader.ContentLanguage, - contentType: &contentHeader.ContentType, - contentMD5: contentHeader.ContentMD5, + cacheControl: fProps.CacheControl, + contentDisposition: fProps.ContentDisposition, + contentEncoding: fProps.ContentEncoding, + contentLanguage: fProps.ContentLanguage, + contentType: fProps.ContentType, + contentMD5: fProps.ContentMD5, } - fileAttrs := uint32(azfile.ParseFileAttributeFlagsString(fProps.FileAttributes())) - permissionKey := fProps.FilePermissionKey() + attr, err := sharefile.ParseNTFSFileAttributes(fProps.FileAttributes) + a.AssertNoErr(err) + fileAttrs := ste.FileAttributesToUint32(*attr) + permissionKey := fProps.FilePermissionKey var perm string - if permissionKey != "" { - sharePerm, err := shareURL.GetPermission(ctx, permissionKey) + if permissionKey != nil { + sharePerm, err := sc.GetPermission(ctx, *permissionKey, nil) a.AssertNoErr(err, "Failed to get permissions from key") - perm = sharePerm.Permission + perm = *sharePerm.Permission } props := objectProperties{ entityType: common.EEntityType.File(), // only enumerating files in list call - size: &fileSize, - nameValueMetadata: fProps.NewMetadata(), + size: fProps.ContentLength, + nameValueMetadata: fProps.Metadata, contentHeaders: &h, - creationTime: &creationTime, - lastWriteTime: &lastWriteTime, + creationTime: fProps.FileCreationTime, + lastWriteTime: fProps.FileLastWriteTime, smbAttributes: &fileAttrs, smbPermissionsSddl: &perm, } - relativePath := lResp.DirectoryPath + "/" + relativePath := *lResp.DirectoryPath + "/" if relativePath == "/" { relativePath = "" } - result[relativePath+fileInfo.Name] = &props + result[relativePath+*fileInfo.Name] = &props } - for _, dirInfo := range lResp.DirectoryItems { - dirURL := currentDirURL.NewDirectoryURL(dirInfo.Name) - dProps, err := dirURL.GetProperties(context.TODO()) + for _, dirInfo := range lResp.Segment.Directories { + dirURL := currentDirURL.NewSubdirectoryClient(*dirInfo.Name) + dProps, err := dirURL.GetProperties(context.TODO(), nil) a.AssertNoErr(err) // Construct the properties object - creationTime, err := time.Parse(azfile.ISO8601, dProps.FileCreationTime()) - a.AssertNoErr(err) - lastWriteTime, err := time.Parse(azfile.ISO8601, dProps.FileLastWriteTime()) - a.AssertNoErr(err) - // Grab the permissions - permKey := dProps.FilePermissionKey() + permissionKey := dProps.FilePermissionKey var perm string - if permKey != "" { - permResp, err := shareURL.GetPermission(ctx, permKey) + if permissionKey != nil { + sharePerm, err := sc.GetPermission(ctx, *permissionKey, nil) a.AssertNoErr(err, "Failed to get permissions from key") - perm = permResp.Permission + perm = *sharePerm.Permission } // Set up properties props := objectProperties{ entityType: common.EEntityType.Folder(), // Only enumerating directories in list call - nameValueMetadata: dProps.NewMetadata(), - creationTime: &creationTime, - lastWriteTime: &lastWriteTime, + nameValueMetadata: dProps.Metadata, + creationTime: dProps.FileCreationTime, + lastWriteTime: dProps.FileLastWriteTime, smbPermissionsSddl: &perm, } // get the directory name properly - relativePath := lResp.DirectoryPath + "/" + relativePath := *lResp.DirectoryPath + "/" if relativePath == "/" { relativePath = "" } - result[relativePath+dirInfo.Name] = &props + result[relativePath+*dirInfo.Name] = &props dirQ = append(dirQ, dirURL) } - - marker = lResp.NextMarker } } @@ -1029,16 +1040,13 @@ func (s scenarioHelper) enumerateShareFileProperties(a asserter, shareURL azfile } func (s scenarioHelper) downloadFileContent(a asserter, options downloadContentOptions) []byte { - fileURL := options.shareURL.NewRootDirectoryURL().NewFileURL(options.resourceRelPath) - downloadResp, err := fileURL.Download(ctx, 0, azfile.CountToEnd, false) + fileURL := options.shareClient.NewRootDirectoryClient().NewFileClient(options.resourceRelPath) + downloadResp, err := fileURL.DownloadStream(ctx, nil) a.AssertNoErr(err) - retryReader := downloadResp.Body(azfile.RetryReaderOptions{}) - defer retryReader.Close() // The client must close the response body when finished with it - - destData, err := io.ReadAll(retryReader) + destData, err := io.ReadAll(downloadResp.Body) + defer downloadResp.Body.Close() a.AssertNoErr(err) - downloadResp.Body(azfile.RetryReaderOptions{}) return destData } @@ -1089,37 +1097,37 @@ func (scenarioHelper) addPrefix(list []string, prefix string) []string { return modifiedList } -func (scenarioHelper) getRawContainerURLWithSAS(c asserter, containerName string) url.URL { +func (scenarioHelper) getRawContainerURLWithSAS(c asserter, containerName string) string { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) - credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) + credential, err := blob.NewSharedKeyCredential(accountName, accountKey) c.AssertNoErr(err) - containerURLWithSAS := getContainerURLWithSAS(c, *credential, containerName) + containerURLWithSAS := getContainerURLWithSAS(c, credential, containerName) return containerURLWithSAS.URL() } -func (scenarioHelper) getRawBlobURLWithSAS(c asserter, containerName string, blobName string) url.URL { +func (scenarioHelper) getRawBlobURLWithSAS(c asserter, containerName string, blobName string) string { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) - credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) + credential, err := blob.NewSharedKeyCredential(accountName, accountKey) c.AssertNoErr(err) - containerURLWithSAS := getContainerURLWithSAS(c, *credential, containerName) - blobURLWithSAS := containerURLWithSAS.NewBlockBlobURL(blobName) + containerURLWithSAS := getContainerURLWithSAS(c, credential, containerName) + blobURLWithSAS := containerURLWithSAS.NewBlockBlobClient(blobName) return blobURLWithSAS.URL() } -func (scenarioHelper) getRawBlobServiceURLWithSAS(c asserter) url.URL { +func (scenarioHelper) getRawBlobServiceURLWithSAS(c asserter) string { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) - credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) + credential, err := blob.NewSharedKeyCredential(accountName, accountKey) c.AssertNoErr(err) - return getBlobServiceURLWithSAS(c, *credential).URL() + return getBlobServiceURLWithSAS(c, credential).URL() } -func (scenarioHelper) getRawFileServiceURLWithSAS(c asserter) url.URL { +func (scenarioHelper) getRawFileServiceURLWithSAS(c asserter) string { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) - credential, err := azfile.NewSharedKeyCredential(accountName, accountKey) + credential, err := sharefile.NewSharedKeyCredential(accountName, accountKey) c.AssertNoErr(err) - return getFileServiceURLWithSAS(c, *credential).URL() + return getFileServiceURLWithSAS(c, credential).URL() } func (scenarioHelper) getRawAdlsServiceURLWithSAS(c asserter) azbfs.ServiceURL { @@ -1129,28 +1137,27 @@ func (scenarioHelper) getRawAdlsServiceURLWithSAS(c asserter) azbfs.ServiceURL { return getAdlsServiceURLWithSAS(c, *credential) } -func (scenarioHelper) getBlobServiceURL(c asserter) azblob.ServiceURL { +func (scenarioHelper) getBlobServiceURL(c asserter) *blobservice.Client { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) - credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) + credential, err := blob.NewSharedKeyCredential(accountName, accountKey) c.AssertNoErr(err) rawURL := fmt.Sprintf("https://%s.blob.core.windows.net", credential.AccountName()) - // convert the raw url and validate it was parsed successfully - fullURL, err := url.Parse(rawURL) + client, err := blobservice.NewClientWithSharedKeyCredential(rawURL, credential, nil) c.AssertNoErr(err) - return azblob.NewServiceURL(*fullURL, azblob.NewPipeline(credential, azblob.PipelineOptions{})) + return client } -func (s scenarioHelper) getContainerURL(c asserter, containerName string) azblob.ContainerURL { +func (s scenarioHelper) getContainerURL(c asserter, containerName string) *container.Client { serviceURL := s.getBlobServiceURL(c) - containerURL := serviceURL.NewContainerURL(containerName) + containerURL := serviceURL.NewContainerClient(containerName) return containerURL } func (scenarioHelper) getRawS3AccountURL(c asserter, region string) url.URL { - rawURL := fmt.Sprintf("https://s3%s.amazonaws.com", common.IffString(region == "", "", "-"+region)) + rawURL := fmt.Sprintf("https://s3%s.amazonaws.com", common.Iff(region == "", "", "-"+region)) fullURL, err := url.Parse(rawURL) c.AssertNoErr(err) @@ -1160,7 +1167,7 @@ func (scenarioHelper) getRawS3AccountURL(c asserter, region string) url.URL { // TODO: Possibly add virtual-hosted-style and dual stack support. Currently use path style for testing. func (scenarioHelper) getRawS3BucketURL(c asserter, region string, bucketName string) url.URL { - rawURL := fmt.Sprintf("https://s3%s.amazonaws.com/%s", common.IffString(region == "", "", "-"+region), bucketName) + rawURL := fmt.Sprintf("https://s3%s.amazonaws.com/%s", common.Iff(region == "", "", "-"+region), bucketName) fullURL, err := url.Parse(rawURL) c.AssertNoErr(err) @@ -1169,26 +1176,10 @@ func (scenarioHelper) getRawS3BucketURL(c asserter, region string, bucketName st } func (scenarioHelper) getRawS3ObjectURL(c asserter, region string, bucketName string, objectName string) url.URL { - rawURL := fmt.Sprintf("https://s3%s.amazonaws.com/%s/%s", common.IffString(region == "", "", "-"+region), bucketName, objectName) + rawURL := fmt.Sprintf("https://s3%s.amazonaws.com/%s/%s", common.Iff(region == "", "", "-"+region), bucketName, objectName) fullURL, err := url.Parse(rawURL) c.AssertNoErr(err) return *fullURL } - -func (scenarioHelper) getRawFileURLWithSAS(c asserter, shareName string, fileName string) url.URL { - credential, err := getGenericCredentialForFile("") - c.AssertNoErr(err) - shareURLWithSAS := getShareURLWithSAS(c, *credential, shareName) - fileURLWithSAS := shareURLWithSAS.NewRootDirectoryURL().NewFileURL(fileName) - return fileURLWithSAS.URL() -} - -func (scenarioHelper) getRawShareURLWithSAS(c asserter, shareName string) url.URL { - accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) - credential, err := azfile.NewSharedKeyCredential(accountName, accountKey) - c.AssertNoErr(err) - shareURLWithSAS := getShareURLWithSAS(c, *credential, shareName) - return shareURLWithSAS.URL() -} diff --git a/e2etest/zt_basic_copy_sync_remove_test.go b/e2etest/zt_basic_copy_sync_remove_test.go index cbcb01add..20e888dba 100644 --- a/e2etest/zt_basic_copy_sync_remove_test.go +++ b/e2etest/zt_basic_copy_sync_remove_test.go @@ -24,9 +24,10 @@ import ( "crypto/md5" "errors" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-storage-azcopy/v10/azbfs" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" "os" "path/filepath" "runtime" @@ -387,10 +388,11 @@ func TestBasic_CopyRemoveContainerHNS(t *testing.T) { a := h.GetAsserter() s := h.(*scenario) r := s.state.source.(*resourceBlobContainer) - urlParts := azblob.NewBlobURLParts(r.containerURL.URL()) + urlParts, err := blob.ParseURL(r.containerClient.URL()) + a.Assert(err, equals(), nil) fsURL := TestResourceFactory{}.GetDatalakeServiceURL(r.accountType).NewFileSystemURL(urlParts.ContainerName).NewDirectoryURL("") - _, err := fsURL.GetAccessControl(ctx) + _, err = fsURL.GetAccessControl(ctx) a.Assert(err, notEquals(), nil) stgErr, ok := err.(azbfs.StorageError) a.Assert(ok, equals(), true) @@ -840,7 +842,7 @@ func TestBasic_SyncLMTSwitch_PreferServiceLMT(t *testing.T) { anonymousAuthOnly, anonymousAuthOnly, params{ - preserveSMBInfo: BoolPointer(false), + preserveSMBInfo: to.Ptr(false), }, &hooks{ beforeRunJob: func(h hookHelper) { @@ -888,7 +890,7 @@ func TestBasic_SyncLMTSwitch_PreferSMBLMT(t *testing.T) { anonymousAuthOnly, params{ // enforce for Linux/MacOS tests - preserveSMBInfo: BoolPointer(true), + preserveSMBInfo: to.Ptr(true), }, &hooks{ beforeRunJob: func(h hookHelper) { diff --git a/e2etest/zt_client_provided_key_test.go b/e2etest/zt_client_provided_key_test.go index a353b6aac..43dfdcba9 100644 --- a/e2etest/zt_client_provided_key_test.go +++ b/e2etest/zt_client_provided_key_test.go @@ -60,6 +60,20 @@ func TestClient_ProvidedScopeUpload(t *testing.T) { }, EAccountType.Standard(), EAccountType.Standard(), "") } +func TestClient_ProvidedScopeUploadSingleFile(t *testing.T) { + cpkByName := "blobgokeytestscope" + RunScenarios(t, eOperation.Copy(), eTestFromTo.Other(common.EFromTo.LocalBlob()), eValidate.AutoPlusContent(), anonymousAuthOnly, anonymousAuthOnly, params{ + recursive: true, + cpkByName: cpkByName, + }, nil, testFiles{ + defaultSize: "100K", + shouldTransfer: []interface{}{ + f("file1", verifyOnly{with{cpkByName: cpkByName}}), + }, + objectTarget: "file1", + }, EAccountType.Standard(), EAccountType.Standard(), "") +} + func TestClient_ProvidedScopeS2S(t *testing.T) { cpkByName := "blobgokeytestscope" verifyOnlyProps := verifyOnly{with{cpkByName: cpkByName}} @@ -96,6 +110,20 @@ func TestClient_ProvidedScopeDownload(t *testing.T) { }, EAccountType.Standard(), EAccountType.Standard(), "") } +func TestClient_ProvidedScopeDownloadSingleFile(t *testing.T) { + cpkByName := "blobgokeytestscope" + RunScenarios(t, eOperation.Copy(), eTestFromTo.Other(common.EFromTo.BlobLocal()), eValidate.Auto(), anonymousAuthOnly, anonymousAuthOnly, params{ + recursive: true, + cpkByName: cpkByName, + }, nil, testFiles{ + defaultSize: "100K", + shouldTransfer: []interface{}{ + f("file1", with{cpkByName: cpkByName}), + }, + objectTarget: "file1", + }, EAccountType.Standard(), EAccountType.Standard(), "") +} + func TestClient_ProvidedScopeDelete(t *testing.T) { blobRemove := TestFromTo{ desc: "BlobRemove", @@ -185,6 +213,19 @@ func TestClient_ProvidedKeyUpload(t *testing.T) { }, EAccountType.Standard(), EAccountType.Standard(), "") } +func TestClient_ProvidedKeyUploadSingleFile(t *testing.T) { + RunScenarios(t, eOperation.Copy(), eTestFromTo.Other(common.EFromTo.LocalBlob()), eValidate.AutoPlusContent(), anonymousAuthOnly, anonymousAuthOnly, params{ + recursive: true, + cpkByValue: true, + }, nil, testFiles{ + defaultSize: "100K", + shouldTransfer: []interface{}{ + f("file1", verifyOnly{with{cpkByValue: true}}), + }, + objectTarget: "file1", + }, EAccountType.Standard(), EAccountType.Standard(), "") +} + func TestClient_ProvidedKeyS2S(t *testing.T) { verifyOnlyProps := verifyOnly{with{cpkByValue: true}} RunScenarios(t, eOperation.CopyAndSync(), eTestFromTo.Other(common.EFromTo.FileBlob()), eValidate.Auto(), anonymousAuthOnly, anonymousAuthOnly, params{ @@ -220,6 +261,19 @@ func TestClient_ProvidedKeyDownload(t *testing.T) { }, EAccountType.Standard(), EAccountType.Standard(), "") } +func TestClient_ProvidedKeyDownloadSingleFile(t *testing.T) { + RunScenarios(t, eOperation.Copy(), eTestFromTo.Other(common.EFromTo.BlobLocal()), eValidate.Auto(), anonymousAuthOnly, anonymousAuthOnly, params{ + recursive: true, + cpkByValue: true, + }, nil, testFiles{ + defaultSize: "100K", + shouldTransfer: []interface{}{ + f("file1", with{cpkByValue: true}), + }, + objectTarget: "file1", + }, EAccountType.Standard(), EAccountType.Standard(), "") +} + func TestClient_ProvidedKeyDelete(t *testing.T) { blobRemove := TestFromTo{ desc: "BlobRemove", diff --git a/e2etest/zt_copy_file_smb_test.go b/e2etest/zt_copy_file_smb_test.go index b6b90b724..cdb0ec2dd 100644 --- a/e2etest/zt_copy_file_smb_test.go +++ b/e2etest/zt_copy_file_smb_test.go @@ -1,6 +1,7 @@ package e2etest import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-storage-azcopy/v10/common" "runtime" "strings" @@ -13,7 +14,7 @@ func TestSMB_FromShareSnapshot(t *testing.T) { preserveSMBPermissions: true, // default, but present for clarity - //preserveSMBInfo: BoolPointer(true), + //preserveSMBInfo: to.Ptr(true), }, &hooks{ // create a snapshot for the source share beforeRunJob: func(h hookHelper) { @@ -41,7 +42,7 @@ func TestSMB_ToDevNull(t *testing.T) { params{ recursive: true, preserveSMBPermissions: isWindows, - preserveSMBInfo: BoolPointer(isWindows), + preserveSMBInfo: to.Ptr(isWindows), checkMd5: common.EHashValidationOption.FailIfDifferent(), destNull: true, }, diff --git a/e2etest/zt_preserve_access_tier_test.go b/e2etest/zt_preserve_access_tier_test.go index f0f96c24a..44dcda3bc 100644 --- a/e2etest/zt_preserve_access_tier_test.go +++ b/e2etest/zt_preserve_access_tier_test.go @@ -21,8 +21,9 @@ package e2etest import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" "testing" ) @@ -44,7 +45,7 @@ func TestTier_V2ToClassicAccount(t *testing.T) { RunScenarios(t, eOperation.Copy(), eTestFromTo.Other(common.EFromTo.BlobBlob()), eValidate.AutoPlusContent(), anonymousAuthOnly, anonymousAuthOnly, params{ recursive: true, s2sPreserveAccessTier: true, - accessTier: azblob.AccessTierHot, + accessTier: to.Ptr(blob.AccessTierHot), }, nil, testFiles{ defaultSize: "4M", shouldTransfer: []interface{}{ @@ -59,7 +60,7 @@ func TestTier_V2ToClassicAccountNoPreserve(t *testing.T) { RunScenarios(t, eOperation.Copy(), eTestFromTo.Other(common.EFromTo.BlobBlob()), eValidate.AutoPlusContent(), anonymousAuthOnly, anonymousAuthOnly, params{ recursive: true, s2sPreserveAccessTier: false, - accessTier: azblob.AccessTierHot, + accessTier: to.Ptr(blob.AccessTierHot), }, nil, testFiles{ defaultSize: "4M", shouldTransfer: []interface{}{ @@ -74,7 +75,7 @@ func TestTier_V2ToClassicAccountCool(t *testing.T) { RunScenarios(t, eOperation.Copy(), eTestFromTo.Other(common.EFromTo.BlobBlob()), eValidate.AutoPlusContent(), anonymousAuthOnly, anonymousAuthOnly, params{ recursive: true, s2sPreserveAccessTier: true, - accessTier: azblob.AccessTierCool, + accessTier: to.Ptr(blob.AccessTierCool), }, nil, testFiles{ defaultSize: "4M", shouldTransfer: []interface{}{ @@ -89,7 +90,7 @@ func TestTier_V2ToClassicAccountNoPreserveCool(t *testing.T) { RunScenarios(t, eOperation.Copy(), eTestFromTo.Other(common.EFromTo.BlobBlob()), eValidate.AutoPlusContent(), anonymousAuthOnly, anonymousAuthOnly, params{ recursive: true, s2sPreserveAccessTier: false, - accessTier: azblob.AccessTierCool, + accessTier: to.Ptr(blob.AccessTierCool), }, nil, testFiles{ defaultSize: "4M", shouldTransfer: []interface{}{ diff --git a/e2etest/zt_preserve_properties_test.go b/e2etest/zt_preserve_properties_test.go index a37c66014..b5301d3d2 100644 --- a/e2etest/zt_preserve_properties_test.go +++ b/e2etest/zt_preserve_properties_test.go @@ -21,6 +21,7 @@ package e2etest import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "testing" "github.com/Azure/azure-storage-azcopy/v10/common" @@ -35,14 +36,14 @@ func TestProperties_NameValueMetadataIsPreservedS2S(t *testing.T) { }, nil, testFiles{ defaultSize: "1K", shouldTransfer: []interface{}{ - f("filea", with{nameValueMetadata: map[string]string{"foo": "abc", "bar": "def"}}), - folder("fold1", with{nameValueMetadata: map[string]string{"other": "xyz"}}), + f("filea", with{nameValueMetadata: map[string]*string{"foo": to.Ptr("abc"), "bar": to.Ptr("def")}}), + folder("fold1", with{nameValueMetadata: map[string]*string{"other": to.Ptr("xyz")}}), }, }, EAccountType.Standard(), EAccountType.Standard(), "") } func TestProperties_NameValueMetadataCanBeUploaded(t *testing.T) { - expectedMap := map[string]string{"foo": "abc", "bar": "def", "baz": "state=a;b"} + expectedMap := map[string]*string{"foo": to.Ptr("abc"), "bar": to.Ptr("def"), "baz": to.Ptr("state=a;b")} RunScenarios(t, eOperation.Copy(), eTestFromTo.AllUploads(), eValidate.Auto(), anonymousAuthOnly, anonymousAuthOnly, params{ recursive: true, metadata: "foo=abc;bar=def;baz=state=a\\;b", diff --git a/e2etest/zt_preserve_smb_properties_test.go b/e2etest/zt_preserve_smb_properties_test.go index 54510d0e3..861cb177d 100644 --- a/e2etest/zt_preserve_smb_properties_test.go +++ b/e2etest/zt_preserve_smb_properties_test.go @@ -4,12 +4,13 @@ package e2etest import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-storage-azcopy/v10/cmd" "strings" "testing" "time" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-file-go/azfile" "golang.org/x/sys/windows" ) @@ -71,7 +72,7 @@ func TestProperties_SMBPermissionsSDDLPreserved(t *testing.T) { preserveSMBPermissions: true, // default, but present for clarity - //preserveSMBInfo: BoolPointer(true), + //preserveSMBInfo: to.Ptr(true), }, nil, testFiles{ defaultSize: "1K", shouldTransfer: []interface{}{ @@ -92,7 +93,7 @@ func TestProperties_SMBDates(t *testing.T) { recursive: true, // default, but present for clarity - //preserveSMBInfo: BoolPointer(true), + //preserveSMBInfo: to.Ptr(true), }, &hooks{ beforeRunJob: func(h hookHelper) { // Pause then re-write all the files, so that their LastWriteTime is different from their creation time @@ -124,7 +125,7 @@ func TestProperties_SMBFlags(t *testing.T) { recursive: true, // default, but present for clarity - //preserveSMBInfo: BoolPointer(true), + //preserveSMBInfo: to.Ptr(true), }, nil, testFiles{ defaultSize: "1K", shouldTransfer: []interface{}{ @@ -151,13 +152,13 @@ func TestProperties_SMBPermsAndFlagsWithIncludeAfter(t *testing.T) { recursive: true, // default, but present for clarity - //preserveSMBInfo: BoolPointer(true), + //preserveSMBInfo: to.Ptr(true), // includeAfter: SET LATER }, &hooks{ beforeRunJob: func(h hookHelper) { // Pause for a includeAfter time time.Sleep(5 * time.Second) - h.GetModifiableParameters().includeAfter = time.Now().Format(azfile.ISO8601) + h.GetModifiableParameters().includeAfter = time.Now().Format(cmd.ISO8601) // Pause then re-write all the files, so that their LastWriteTime is different from their creation time // So that when validating, our validation can be sure that the right datetime has ended up in the right // field @@ -200,7 +201,7 @@ func TestProperties_SMBPermsAndFlagsWithSync(t *testing.T) { recursive: true, // default, but present for clarity - //preserveSMBInfo: BoolPointer(true), + //preserveSMBInfo: to.Ptr(true), }, &hooks{ beforeRunJob: func(h hookHelper) { // Pause then re-write all the files, so that their LastWriteTime is different from their creation time @@ -256,7 +257,7 @@ func TestProperties_SMBWithCopyWithShareRoot(t *testing.T) { preserveSMBPermissions: true, // default, but present for clarity - //preserveSMBInfo: BoolPointer(true), + //preserveSMBInfo: to.Ptr(true), }, nil, testFiles{ @@ -290,7 +291,7 @@ func TestProperties_SMBTimes(t *testing.T) { recursive: true, // default, but present for clarity - //preserveSMBInfo: BoolPointer(true), + //preserveSMBInfo: to.Ptr(true), }, nil, testFiles{ @@ -317,7 +318,7 @@ func TestProperties_EnsureContainerBehavior(t *testing.T) { anonymousAuthOnly, params{ recursive: true, - preserveSMBInfo: BoolPointer(true), + preserveSMBInfo: to.Ptr(true), preserveSMBPermissions: true, }, nil, diff --git a/e2etest/zt_remove_test.go b/e2etest/zt_remove_test.go index 7003442ed..ed3c8880c 100644 --- a/e2etest/zt_remove_test.go +++ b/e2etest/zt_remove_test.go @@ -21,7 +21,10 @@ package e2etest import ( - "github.com/Azure/azure-storage-file-go/azfile" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-storage-azcopy/v10/cmd" + "github.com/Azure/azure-storage-azcopy/v10/common" "testing" "time" ) @@ -42,7 +45,7 @@ func TestRemove_IncludeAfter(t *testing.T) { beforeRunJob: func(h hookHelper) { // Pause for a includeAfter time time.Sleep(5 * time.Second) - h.GetModifiableParameters().includeAfter = time.Now().Format(azfile.ISO8601) + h.GetModifiableParameters().includeAfter = time.Now().Format(cmd.ISO8601) // Pause then re-write all the files, so that their LastWriteTime is different from their creation time // So that when validating, our validation can be sure that the right datetime has ended up in the right // field @@ -65,4 +68,41 @@ func TestRemove_IncludeAfter(t *testing.T) { shouldTransfer: recreateFiles, shouldIgnore: skippedFiles, }, EAccountType.Standard(), EAccountType.Standard(), "") -} \ No newline at end of file +} + +func TestRemove_WithSnapshotsBlob(t *testing.T) { + blobRemove := TestFromTo{ + desc: "AllRemove", + useAllTos: true, + froms: []common.Location{ + common.ELocation.Blob(), + }, + tos: []common.Location{ + common.ELocation.Unknown(), + }, + } + RunScenarios(t, eOperation.Remove(), blobRemove, eValidate.Auto(), anonymousAuthOnly, anonymousAuthOnly, params{ + recursive: true, + }, &hooks{ + beforeRunJob: func(h hookHelper) { + blobClient := h.GetSource().(*resourceBlobContainer).containerClient.NewBlobClient("filea") + _, err := blobClient.CreateSnapshot(ctx, nil) + if err != nil { + t.Errorf("error creating snapshot %s", err) + } + }, + afterValidation: func(h hookHelper) { + blobClient := h.GetSource().(*resourceBlobContainer).containerClient.NewBlobClient("filea") + _, err := blobClient.Delete(ctx, &blob.DeleteOptions{DeleteSnapshots: to.Ptr(blob.DeleteSnapshotsOptionTypeInclude)}) + if err != nil { + t.Errorf("error deleting blob %s", err) + } + }, + }, testFiles{ + defaultSize: "1K", + shouldSkip: []interface{}{ + f("filea"), + }, + objectTarget: "filea", + }, EAccountType.Standard(), EAccountType.Standard(), "") +} diff --git a/e2etest/zt_resume_test.go b/e2etest/zt_resume_test.go index bd5ed75d2..afb248a0a 100644 --- a/e2etest/zt_resume_test.go +++ b/e2etest/zt_resume_test.go @@ -1,10 +1,11 @@ package e2etest import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" "testing" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" "github.com/google/uuid" ) @@ -87,7 +88,7 @@ func TestResume_PublicSource_BlobTarget(t *testing.T) { nil, testFiles{ defaultSize: "1K", - sourcePublic: azblob.PublicAccessBlob, + sourcePublic: to.Ptr(container.PublicAccessTypeBlob), objectTarget: "a.txt", shouldTransfer: []interface{}{ @@ -113,7 +114,7 @@ func TestResume_PublicSource_ContainerTarget(t *testing.T) { nil, testFiles{ defaultSize: "1K", - sourcePublic: azblob.PublicAccessContainer, + sourcePublic: to.Ptr(container.PublicAccessTypeContainer), shouldTransfer: []interface{}{ f("a.txt"), diff --git a/e2etest/zt_resume_windows_test.go b/e2etest/zt_resume_windows_test.go index 611d75763..1011fa165 100644 --- a/e2etest/zt_resume_windows_test.go +++ b/e2etest/zt_resume_windows_test.go @@ -15,7 +15,7 @@ func TestResume_FolderState(t *testing.T) { }, // default, but present for clarity - //preserveSMBInfo: BoolPointer(true), + //preserveSMBInfo: to.Ptr(true), }, nil, testFiles{ defaultSize: "1K", @@ -37,7 +37,7 @@ func TestResume_NoCreateFolder(t *testing.T) { }, // default, but present for clarity - //preserveSMBInfo: BoolPointer(true), + //preserveSMBInfo: to.Ptr(true), }, &hooks{ beforeResumeHook: func(h hookHelper) { // Create the folder in the middle of the transfer diff --git a/e2etest/zt_trailingdot_test.go b/e2etest/zt_trailingdot_test.go index dbe83b422..2302435f6 100644 --- a/e2etest/zt_trailingdot_test.go +++ b/e2etest/zt_trailingdot_test.go @@ -23,7 +23,6 @@ package e2etest import ( "context" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-file-go/azfile" "runtime" "testing" ) @@ -101,12 +100,12 @@ func TestTrailingDot_Disabled(t *testing.T) { trailingDot: common.ETrailingDotOption.Disable(), }, &hooks{ afterValidation: func(h hookHelper) { - shareURL := h.GetDestination().(*resourceAzureFileShare).shareURL - l, err := shareURL.NewRootDirectoryURL().ListFilesAndDirectoriesSegment(context.Background(), azfile.Marker{}, azfile.ListFilesAndDirectoriesOptions{}) + shareURL := h.GetDestination().(*resourceAzureFileShare).shareClient + l, err := shareURL.NewRootDirectoryClient().NewListFilesAndDirectoriesPager(nil).NextPage(context.Background()) if err != nil { panic(err) } - if len(l.FileItems) != 1 { + if len(l.Segment.Files) != 1 { panic("expected 1 file named `file`") } }, diff --git a/go.mod b/go.mod index 83f17b7c6..4202288fb 100644 --- a/go.mod +++ b/go.mod @@ -3,8 +3,8 @@ module github.com/Azure/azure-storage-azcopy/v10 require ( cloud.google.com/go/storage v1.29.0 github.com/Azure/azure-pipeline-go v0.2.4-0.20220425205405-09e6f201e1e4 - github.com/Azure/azure-storage-blob-go v0.15.0 - github.com/Azure/azure-storage-file-go v0.6.1-0.20201111053559-3c1754dc00a5 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.0.0 github.com/Azure/go-autorest/autorest/adal v0.9.18 github.com/JeffreyRichter/enum v0.0.0-20180725232043-2567042f9cda github.com/danieljoos/wincred v1.1.2 @@ -19,29 +19,36 @@ require ( github.com/spf13/cobra v1.4.0 github.com/wastore/keychain v0.0.0-20180920053336-f2c902a3d807 github.com/wastore/keyctl v0.3.1 - golang.org/x/crypto v0.0.0-20220314234724-5d542ad81a58 + golang.org/x/crypto v0.9.0 // indirect golang.org/x/oauth2 v0.4.0 golang.org/x/sync v0.1.0 - golang.org/x/sys v0.5.0 + golang.org/x/sys v0.8.0 google.golang.org/api v0.106.0 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c ) require github.com/stretchr/testify v1.8.1 +require ( + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 + github.com/Azure/go-autorest/autorest/date v0.3.0 +) + require ( cloud.google.com/go v0.107.0 // indirect cloud.google.com/go/compute v1.15.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v0.8.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-ini/ini v1.66.4 // indirect - github.com/golang-jwt/jwt/v4 v4.3.0 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect @@ -49,13 +56,15 @@ require ( github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/kr/text v0.2.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/net v0.7.0 // indirect - golang.org/x/text v0.7.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/text v0.9.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect diff --git a/go.sum b/go.sum index 16f30a253..b36aea565 100644 --- a/go.sum +++ b/go.sum @@ -10,16 +10,24 @@ cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGE cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= cloud.google.com/go/storage v1.29.0 h1:6weCgzRvMg7lzuUurI4697AqIRPU1SvzHhynwpW31jI= cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= -github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= +github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.4-0.20220425205405-09e6f201e1e4 h1:hDJImUzpTAeIw/UasFUUDB/+UsZm5Q/6x2/jKKvEUiw= github.com/Azure/azure-pipeline-go v0.2.4-0.20220425205405-09e6f201e1e4/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= -github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk= -github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58= -github.com/Azure/azure-storage-file-go v0.6.1-0.20201111053559-3c1754dc00a5 h1:aHEvBM4oXIWSTOVdL55nCYXO0Cl7ie3Ui5xMQhLVez8= -github.com/Azure/azure-storage-file-go v0.6.1-0.20201111053559-3c1754dc00a5/go.mod h1:++L7GP2pRyUNuastZ7m02vYV69JHmqlWXfCaGoL0v4s= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0 h1:Ma67P/GGprNwsslzEH6+Kb8nybI8jpDTm4Wmzu2ReK8= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 h1:nVocQV40OQne5613EeLayJiRAJuKlBGy+m22qWG+WRg= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0/go.mod h1:7QJP7dr2wznCMeqIrhMgWGf7XpAQnVrJqDm9nvV3Cu4= +github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.0.0 h1:iqXx16jKhIkx1FLPA4tsaXLc6zIrj/kMesoutWDv6MI= +github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.0.0/go.mod h1:AjDdvSU6d92BGS2JfdsKi+H/c2vQY3OFp4qhxzsUH8g= +github.com/Azure/azure-storage-file-go v0.8.0 h1:OX8DGsleWLUE6Mw4R/OeWEZMvsTIpwN94J59zqKQnTI= +github.com/Azure/azure-storage-file-go v0.8.0/go.mod h1:3w3mufGcMjcOJ3w+4Gs+5wsSgkT7xDwWWqMMIrXtW4c= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= @@ -30,6 +38,8 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/JeffreyRichter/enum v0.0.0-20180725232043-2567042f9cda h1:NOo6+gM9NNPJ3W56nxOKb4164LEw094U0C8zYQM8mQU= github.com/JeffreyRichter/enum v0.0.0-20180725232043-2567042f9cda/go.mod h1:2CaSFTh2ph9ymS6goiOKIBdfhwWUVsX4nQ5QjIYFHHs= @@ -46,16 +56,16 @@ github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnG github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/go-ini/ini v1.66.4 h1:dKjMqkcbkzfddhIhyglTPgMoJnkvmG+bSLrU9cTHc5M= github.com/go-ini/ini v1.66.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog= -github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -85,7 +95,6 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.2.1 h1:RY7tHKZcRlk788d5WSo/e83gOyyy742E8GSs771ySpg= @@ -101,9 +110,13 @@ github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-ieproxy v0.0.11 h1:MQ/5BuGSgDAHZOJe6YY80IF2UVCfGkwfo6AeD7HtHYo= github.com/mattn/go-ieproxy v0.0.11/go.mod h1:/NsJd+kxZBmjMc5hrJCKMbP57B84rvq9BiDRbtO9AS0= @@ -111,7 +124,8 @@ github.com/minio/minio-go v6.0.14+incompatible h1:fnV+GD28LeqdN6vT2XdGKW8Qe/IfjJ github.com/minio/minio-go v6.0.14+incompatible/go.mod h1:7guKYtitv8dktvNUGrhzmNlA5wrAABTQXCoesZdFQO8= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -146,11 +160,9 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220314234724-5d542ad81a58 h1:L8CkJyVoa0/NslN3RUMLgasK5+KatNvyRGQ9QyCYAfc= -golang.org/x/crypto v0.0.0-20220314234724-5d542ad81a58/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -166,10 +178,9 @@ golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= @@ -181,23 +192,24 @@ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -239,7 +251,6 @@ google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175 google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= diff --git a/jobsAdmin/JobsAdmin.go b/jobsAdmin/JobsAdmin.go index 00b4d3445..e5fbb249f 100755 --- a/jobsAdmin/JobsAdmin.go +++ b/jobsAdmin/JobsAdmin.go @@ -24,7 +24,6 @@ import ( "context" "encoding/json" "fmt" - "github.com/Azure/azure-storage-blob-go/azblob" "os" "path/filepath" "runtime" @@ -75,7 +74,7 @@ var JobsAdmin interface { // JobMgr returns the specified JobID's JobMgr JobMgr(jobID common.JobID) (ste.IJobMgr, bool) - JobMgrEnsureExists(jobID common.JobID, level common.LogLevel, commandString string, sourceBlobToken azblob.Credential) ste.IJobMgr + JobMgrEnsureExists(jobID common.JobID, level common.LogLevel, commandString string) ste.IJobMgr // AddJobPartMgr associates the specified JobPartMgr with the Jobs Administrator //AddJobPartMgr(appContext context.Context, planFile JobPartPlanFileName) IJobPartMgr @@ -293,12 +292,12 @@ func (ja *jobsAdmin) AppPathFolder() string { // JobMgrEnsureExists returns the specified JobID's IJobMgr if it exists or creates it if it doesn't already exit // If it does exist, then the appCtx argument is ignored. func (ja *jobsAdmin) JobMgrEnsureExists(jobID common.JobID, - level common.LogLevel, commandString string, sourceBlobToken azblob.Credential) ste.IJobMgr { + level common.LogLevel, commandString string) ste.IJobMgr { return ja.jobIDToJobMgr.EnsureExists(jobID, func() ste.IJobMgr { // Return existing or new IJobMgr to caller - return ste.NewJobMgr(ja.concurrency, jobID, ja.appCtx, ja.cpuMonitor, level, commandString, ja.logDir, ja.concurrencyTuner, ja.pacer, ja.slicePool, ja.cacheLimiter, ja.fileCountLimiter, ja.jobLogger, false, sourceBlobToken) + return ste.NewJobMgr(ja.concurrency, jobID, ja.appCtx, ja.cpuMonitor, level, commandString, ja.logDir, ja.concurrencyTuner, ja.pacer, ja.slicePool, ja.cacheLimiter, ja.fileCountLimiter, ja.jobLogger, false) }) } @@ -387,7 +386,7 @@ func (ja *jobsAdmin) ResurrectJob(jobId common.JobID, sourceSAS string, destinat continue } mmf := planFile.Map() - jm := ja.JobMgrEnsureExists(jobID, mmf.Plan().LogLevel, "", nil) + jm := ja.JobMgrEnsureExists(jobID, mmf.Plan().LogLevel, "") jm.AddJobPart(partNum, planFile, mmf, sourceSAS, destinationSAS, false, nil) } @@ -421,7 +420,7 @@ func (ja *jobsAdmin) ResurrectJobParts() { } mmf := planFile.Map() //todo : call the compute transfer function here for each job. - jm := ja.JobMgrEnsureExists(jobID, mmf.Plan().LogLevel, "", nil) + jm := ja.JobMgrEnsureExists(jobID, mmf.Plan().LogLevel, "") jm.AddJobPart(partNum, planFile, mmf, EMPTY_SAS_STRING, EMPTY_SAS_STRING, false, nil) } } diff --git a/jobsAdmin/init.go b/jobsAdmin/init.go index 11a0a5da9..5d8698364 100755 --- a/jobsAdmin/init.go +++ b/jobsAdmin/init.go @@ -165,7 +165,7 @@ func ExecuteNewCopyJobPartOrder(order common.CopyJobPartOrderRequest) common.Cop // Get the file name for this Job Part's Plan jppfn := JobsAdmin.NewJobPartPlanFileName(order.JobID, order.PartNum) jppfn.Create(order) // Convert the order to a plan file - jm := JobsAdmin.JobMgrEnsureExists(order.JobID, order.LogLevel, order.CommandString, order.CredentialInfo.SourceBlobToken) // Get a this job part's job manager (create it if it doesn't exist) + jm := JobsAdmin.JobMgrEnsureExists(order.JobID, order.LogLevel, order.CommandString) // Get a this job part's job manager (create it if it doesn't exist) if len(order.Transfers.List) == 0 && order.IsFinalPart { /* @@ -260,7 +260,7 @@ func CancelPauseJobOrder(jobID common.JobID, desiredJobStatus common.JobStatus) case common.EJobStatus.Paused(): // Logically, It's OK to pause an already-paused job jpp0.SetJobStatus(desiredJobStatus) msg := fmt.Sprintf("JobID=%v %s", jobID, - common.IffString(desiredJobStatus == common.EJobStatus.Paused(), "paused", "canceled")) + common.Iff(desiredJobStatus == common.EJobStatus.Paused(), "paused", "canceled")) if jm.ShouldLog(pipeline.LogInfo) { jm.Log(pipeline.LogInfo, msg) diff --git a/perf-test.yaml b/perf-test.yaml index 6c935d9f7..3a00e6f48 100644 --- a/perf-test.yaml +++ b/perf-test.yaml @@ -38,8 +38,8 @@ stages: displayName: 'Build Azcopy' - script: | - time azcopy copy $(Blob2BlobLargeFilesSrc) $(Blob2BlobLargeFilesDst) --recursive --block-size-mb=128 --log-level=ERROR --cap-mbps=40000 - displayName: 'Blob2Blob - Large Files' + time azcopy copy $(Blob2BlobSmallFilesSrc) $(Blob2BlobSmallFilesDst) --recursive --check-length=false --log-level=ERROR + displayName: 'Blob2Blob - Small Files' condition: always() env: AZCOPY_AUTO_LOGIN_TYPE: $(AZCOPY_AUTO_LOGIN_TYPE) @@ -60,8 +60,8 @@ stages: AZCOPY_SHOW_PERF_STATES: "1" - script: | - time azcopy copy $(Blob2BlobSmallFilesSrc) $(Blob2BlobSmallFilesDst) --recursive --check-length=false --log-level=ERROR - displayName: 'Blob2Blob - Small Files' + time azcopy copy $(Blob2BlobLargeFilesSrc) $(Blob2BlobLargeFilesDst) --recursive --block-size-mb=128 --log-level=ERROR --cap-mbps=40000 + displayName: 'Blob2Blob - Large Files' condition: always() env: AZCOPY_AUTO_LOGIN_TYPE: $(AZCOPY_AUTO_LOGIN_TYPE) @@ -71,9 +71,9 @@ stages: AZCOPY_SHOW_PERF_STATES: "1" - script: | - sudo mkdir -m 777 $(localPath)/largeFiles/ - time azcopy copy $(Blob2BlobLargeFilesSrc) /dev/null --recursive --log-level=ERROR - displayName: 'Download - Large files' + sudo mkdir -m 777 $(localPath)/smallFiles/ + time azcopy copy $(Blob2BlobSmallFilesSrc) /dev/null --recursive --check-length=false --log-level=ERROR + displayName: 'Download - Small Files' condition: always() env: AZCOPY_AUTO_LOGIN_TYPE: $(AZCOPY_AUTO_LOGIN_TYPE) @@ -82,9 +82,9 @@ stages: AZCOPY_LOG_LOCATION: $(Build.ArtifactStagingDirectory)/logs - script: | - time azcopy bench $(Blob2BlobLargeFilesDst) --log-level=ERROR --size-per-file=50G --file-count=50 --put-md5=false --delete-test-data=false + time azcopy bench $(Blob2BlobSmallFilesDst) --size-per-file=5k --file-count=8000000 --check-length=false --log-level=ERROR --delete-test-data=false sudo rm -rf $(localPath)/* - displayName: 'Upload - Large files' + displayName: 'Upload - Small Files' condition: always() env: AZCOPY_AUTO_LOGIN_TYPE: $(AZCOPY_AUTO_LOGIN_TYPE) @@ -115,9 +115,9 @@ stages: AZCOPY_LOG_LOCATION: $(Build.ArtifactStagingDirectory)/logs - script: | - sudo mkdir -m 777 $(localPath)/smallFiles/ - time azcopy copy $(Blob2BlobSmallFilesSrc) /dev/null --recursive --check-length=false --log-level=ERROR - displayName: 'Download - Small Files' + sudo mkdir -m 777 $(localPath)/largeFiles/ + time azcopy copy $(Blob2BlobLargeFilesSrc) /dev/null --recursive --log-level=ERROR + displayName: 'Download - Large files' condition: always() env: AZCOPY_AUTO_LOGIN_TYPE: $(AZCOPY_AUTO_LOGIN_TYPE) @@ -126,9 +126,9 @@ stages: AZCOPY_LOG_LOCATION: $(Build.ArtifactStagingDirectory)/logs - script: | - time azcopy bench $(Blob2BlobSmallFilesDst) --size-per-file=5k --file-count=8000000 --check-length=false --log-level=ERROR --delete-test-data=false + time azcopy bench $(Blob2BlobLargeFilesDst) --log-level=ERROR --size-per-file=50G --file-count=50 --put-md5=false --delete-test-data=false sudo rm -rf $(localPath)/* - displayName: 'Upload - Small Files' + displayName: 'Upload - Large files' condition: always() env: AZCOPY_AUTO_LOGIN_TYPE: $(AZCOPY_AUTO_LOGIN_TYPE) diff --git a/ste/ErrorExt.go b/ste/ErrorExt.go index 9a6244a8e..309753302 100644 --- a/ste/ErrorExt.go +++ b/ste/ErrorExt.go @@ -1,11 +1,11 @@ package ste import ( + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "net/http" "github.com/Azure/azure-storage-azcopy/v10/azbfs" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" ) type ErrorEx struct { @@ -14,11 +14,11 @@ type ErrorEx struct { // TODO: consider rolling MSRequestID into this, so that all places that use this can pick up, and log, the request ID too func (errex ErrorEx) ErrorCodeAndString() (string, int, string) { + var respErr *azcore.ResponseError + if errors.As(errex.error, &respErr) { + return respErr.ErrorCode, respErr.StatusCode, respErr.RawResponse.Status + } switch e := interface{}(errex.error).(type) { - case azblob.StorageError: - return string(e.ServiceCode()), e.Response().StatusCode, e.Response().Status - case azfile.StorageError: - return string(e.ServiceCode()), e.Response().StatusCode, e.Response().Status case azbfs.StorageError: return string(e.ServiceCode()), e.Response().StatusCode, e.Response().Status default: @@ -33,6 +33,10 @@ type hasResponse interface { // MSRequestID gets the request ID guid associated with the failed request. // Returns "" if there isn't one (either no request, or there is a request but it doesn't have the header) func (errex ErrorEx) MSRequestID() string { + var respErr *azcore.ResponseError + if errors.As(errex.error, &respErr) { + return respErr.RawResponse.Header.Get("x-ms-request-id") + } if respErr, ok := errex.error.(hasResponse); ok { r := respErr.Response() if r != nil { diff --git a/ste/JobPartPlan.go b/ste/JobPartPlan.go index 84e35dbca..1fd04a7e7 100644 --- a/ste/JobPartPlan.go +++ b/ste/JobPartPlan.go @@ -2,12 +2,12 @@ package ste import ( "errors" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "reflect" "sync/atomic" "unsafe" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" ) // dataSchemaVersion defines the data schema version of JobPart order files supported by @@ -208,7 +208,7 @@ func (jpph *JobPartPlanHeader) getString(offset int64, length int16) string { // TransferSrcPropertiesAndMetadata returns the SrcHTTPHeaders, properties and metadata for a transfer at given transferIndex in JobPartOrder // TODO: Refactor return type to an object -func (jpph *JobPartPlanHeader) TransferSrcPropertiesAndMetadata(transferIndex uint32) (h common.ResourceHTTPHeaders, metadata common.Metadata, blobType azblob.BlobType, blobTier azblob.AccessTierType, +func (jpph *JobPartPlanHeader) TransferSrcPropertiesAndMetadata(transferIndex uint32) (h common.ResourceHTTPHeaders, metadata common.Metadata, blobType blob.BlobType, blobTier blob.AccessTier, s2sGetPropertiesInBackend bool, DestLengthValidation bool, s2sSourceChangeValidation bool, s2sInvalidMetadataHandleOption common.InvalidMetadataHandleOption, entityType common.EntityType, blobVersionID string, blobSnapshotID string, blobTags common.BlobTags) { var err error t := jpph.Transfer(transferIndex) @@ -254,12 +254,12 @@ func (jpph *JobPartPlanHeader) TransferSrcPropertiesAndMetadata(transferIndex ui } if t.SrcBlobTypeLength != 0 { tmpBlobTypeStr := []byte(jpph.getString(offset, t.SrcBlobTypeLength)) - blobType = azblob.BlobType(tmpBlobTypeStr) + blobType = blob.BlobType(tmpBlobTypeStr) offset += int64(t.SrcBlobTypeLength) } if t.SrcBlobTierLength != 0 { tmpBlobTierStr := []byte(jpph.getString(offset, t.SrcBlobTierLength)) - blobTier = azblob.AccessTierType(tmpBlobTierStr) + blobTier = blob.AccessTier(tmpBlobTierStr) offset += int64(t.SrcBlobTierLength) } if t.SrcBlobVersionIDLength != 0 { @@ -426,7 +426,7 @@ func (jppt *JobPartPlanTransfer) SetTransferStatus(status common.TransferStatus, common.AtomicMorphInt32((*int32)(&jppt.atomicTransferStatus), func(startVal int32) (val int32, morphResult interface{}) { // If current transfer status has some completed value, then it will not be changed. - return common.Iffint32(common.TransferStatus(startVal).StatusLocked(), startVal, int32(status)), nil + return common.Iff(common.TransferStatus(startVal).StatusLocked(), startVal, int32(status)), nil }) } else { (&jppt.atomicTransferStatus).AtomicStore(status) @@ -447,7 +447,7 @@ func (jppt *JobPartPlanTransfer) SetErrorCode(errorCode int32, overwrite bool) { func(startErrorCode int32) (val int32, morphResult interface{}) { // startErrorCode != 0 means that error code is already set. // If current error code is already set to some error code, then it will not be changed. - return common.Iffint32(startErrorCode != 0, startErrorCode, errorCode), nil + return common.Iff(startErrorCode != 0, startErrorCode, errorCode), nil }) } else { atomic.StoreInt32(&jppt.atomicErrorCode, errorCode) diff --git a/ste/downloader-azureFiles.go b/ste/downloader-azureFiles.go index bc91feca9..e663d5bfe 100644 --- a/ste/downloader-azureFiles.go +++ b/ste/downloader-azureFiles.go @@ -22,10 +22,10 @@ package ste import ( "errors" - "net/url" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "time" "github.com/Azure/azure-pipeline-go/pipeline" - "github.com/Azure/azure-storage-file-go/azfile" "github.com/Azure/azure-storage-azcopy/v10/common" ) @@ -117,32 +117,32 @@ func (bd *azureFilesDownloader) GenerateDownloadFunc(jptm IJobPartTransferMgr, s return createDownloadChunkFunc(jptm, id, func() { // step 1: Downloading the file from range startIndex till (startIndex + adjustedChunkSize) - info := jptm.Info() - u, _ := url.Parse(info.Source) - srcFileURL := azfile.NewFileURL(*u, srcPipeline) + source := jptm.Info().Source + fileClient := common.CreateShareFileClient(source, jptm.CredentialInfo(), jptm.CredentialOpOptions(), jptm.ClientOptions()) // At this point we create an HTTP(S) request for the desired portion of the file, and // wait until we get the headers back... but we have not yet read its whole body. // The Download method encapsulates any retries that may be necessary to get to the point of receiving response headers. jptm.LogChunkStatus(id, common.EWaitReason.HeaderResponse()) - get, err := srcFileURL.Download(jptm.Context(), id.OffsetInFile(), length, false) + // TODO : Why no enriched context here? enrichedContext := withRetryNotification(jptm.Context(), bd.filePacer) + get, err := fileClient.DownloadStream(jptm.Context(), &file.DownloadStreamOptions{Range: file.HTTPRange{Offset: id.OffsetInFile(), Count: length}}) if err != nil { jptm.FailActiveDownload("Downloading response body", err) // cancel entire transfer because this chunk has failed return } // Verify that the file has not been changed via a client side LMT check - getLocation := get.LastModified().Location() - if !get.LastModified().Equal(jptm.LastModifiedTime().In(getLocation)) { + getLMT := get.LastModified.In(time.FixedZone("GMT", 0)) + if !getLMT.Equal(jptm.LastModifiedTime().In(time.FixedZone("GMT", 0))) { jptm.FailActiveDownload("Azure File modified during transfer", - errors.New("Azure File modified during transfer")) + errors.New("azure File modified during transfer")) } // step 2: Enqueue the response body to be written out to disk // The retryReader encapsulates any retries that may be necessary while downloading the body jptm.LogChunkStatus(id, common.EWaitReason.Body()) - retryReader := get.Body(azfile.RetryReaderOptions{ - MaxRetryRequests: MaxRetryPerDownloadBody, - NotifyFailedRead: common.NewReadLogFunc(jptm, u), + retryReader := get.NewRetryReader(jptm.Context(), &file.RetryReaderOptions{ + MaxRetries: MaxRetryPerDownloadBody, + OnFailedRead: common.NewFileReadLogFunc(jptm, source), }) defer retryReader.Close() err = destWriter.EnqueueChunk(jptm.Context(), id, length, newPacedResponseBody(jptm.Context(), retryReader, pacer), true) diff --git a/ste/downloader-azureFiles_linux.go b/ste/downloader-azureFiles_linux.go index 6847fde83..e4077490a 100644 --- a/ste/downloader-azureFiles_linux.go +++ b/ste/downloader-azureFiles_linux.go @@ -6,14 +6,13 @@ package ste import ( "encoding/binary" "fmt" - "net/url" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" "path/filepath" "strings" "sync" "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-azcopy/v10/sddl" - "github.com/Azure/azure-storage-file-go/azfile" "github.com/pkg/xattr" "golang.org/x/sys/unix" @@ -30,16 +29,19 @@ func (*azureFilesDownloader) PutSMBProperties(sip ISMBPropertyBearingSourceInfoP // Set 32-bit FileAttributes for the file. setAttributes := func() error { + attribs, err := propHolder.FileAttributes() + if err != nil { + return fmt.Errorf("attempted to read SMB properties: %w", err) + } // This is a safe conversion. - attribs := uint32(propHolder.FileAttributes()) - + attr := FileAttributesToUint32(*attribs) xattrbuf := make([]byte, 4) - binary.LittleEndian.PutUint32(xattrbuf, uint32(attribs)) + binary.LittleEndian.PutUint32(xattrbuf, attr) - err := xattr.Set(txInfo.Destination, common.CIFS_XATTR_ATTRIB, xattrbuf) + err = xattr.Set(txInfo.Destination, common.CIFS_XATTR_ATTRIB, xattrbuf) if err != nil { return fmt.Errorf("xattr.Set(%s, %s, 0x%x) failed: %w", - txInfo.Destination, common.CIFS_XATTR_ATTRIB, attribs, err) + txInfo.Destination, common.CIFS_XATTR_ATTRIB, attr, err) } return nil @@ -219,12 +221,11 @@ func (a *azureFilesDownloader) PutSDDL(sip ISMBPropertyBearingSourceInfoProvider // TODO: this method may become obsolete if/when we are able to get permissions from the share root func (a *azureFilesDownloader) parentIsShareRoot(source string) bool { - u, err := url.Parse(source) + fileURLParts, err := file.ParseURL(source) if err != nil { return false } - f := azfile.NewFileURLParts(*u) - path := f.DirectoryOrFilePath + path := fileURLParts.DirectoryOrFilePath sep := common.DeterminePathSeparator(path) splitPath := strings.Split(strings.Trim(path, sep), sep) return path != "" && len(splitPath) == 1 diff --git a/ste/downloader-azureFiles_windows.go b/ste/downloader-azureFiles_windows.go index 04b5dd6da..25a0fb1cd 100644 --- a/ste/downloader-azureFiles_windows.go +++ b/ste/downloader-azureFiles_windows.go @@ -5,7 +5,7 @@ package ste import ( "fmt" - "net/url" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" "path/filepath" "strings" "sync" @@ -14,7 +14,6 @@ import ( "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-azcopy/v10/sddl" - "github.com/Azure/azure-storage-file-go/azfile" "github.com/hillu/go-ntdll" "golang.org/x/sys/windows" @@ -42,11 +41,9 @@ func (bd *azureFilesDownloader) PutSMBProperties(sip ISMBPropertyBearingSourceIn if fromTo.From() == common.ELocation.File() { // Files SDK can panic when the service hands it something unexpected! defer func() { // recover from potential panics and output raw properties for debug purposes; will cover the return call to setAttributes if panicerr := recover(); panicerr != nil { - pAdapt := propHolder.(*azfile.SMBPropertyAdapter) - - attr := pAdapt.PropertySource.FileAttributes() - lwt := pAdapt.PropertySource.FileLastWriteTime() - fct := pAdapt.PropertySource.FileCreationTime() + attr, _ := propHolder.FileAttributes() + lwt := propHolder.FileLastWriteTime() + fct := propHolder.FileCreationTime() err = fmt.Errorf("failed to read SMB properties (%w)! Raw data: attr: `%s` lwt: `%s`, fct: `%s`", err, attr, lwt, fct) } @@ -54,9 +51,9 @@ func (bd *azureFilesDownloader) PutSMBProperties(sip ISMBPropertyBearingSourceIn } setAttributes := func() error { - attribs := propHolder.FileAttributes() + attribs, _ := propHolder.FileAttributes() // This is a safe conversion. - err := windows.SetFileAttributes(destPtr, uint32(attribs)) + err = windows.SetFileAttributes(destPtr, FileAttributesToUint32(*attribs)) if err != nil { return fmt.Errorf("attempted file set attributes: %w", err) } @@ -252,12 +249,11 @@ func (a *azureFilesDownloader) PutSDDL(sip ISMBPropertyBearingSourceInfoProvider // TODO: this method may become obsolete if/when we are able to get permissions from the share root func (a *azureFilesDownloader) parentIsShareRoot(source string) bool { - u, err := url.Parse(source) + fileURLParts, err := file.ParseURL(source) if err != nil { return false } - f := azfile.NewFileURLParts(*u) - path := f.DirectoryOrFilePath + path := fileURLParts.DirectoryOrFilePath sep := common.DeterminePathSeparator(path) splitPath := strings.Split(strings.Trim(path, sep), sep) return path != "" && len(splitPath) == 1 diff --git a/ste/downloader-blob.go b/ste/downloader-blob.go index 5f25fade9..6efd953b4 100644 --- a/ste/downloader-blob.go +++ b/ste/downloader-blob.go @@ -21,12 +21,14 @@ package ste import ( - "net/url" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob" "os" + "time" "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" ) type blobDownloader struct { @@ -67,13 +69,14 @@ func (bd *blobDownloader) Prologue(jptm IJobPartTransferMgr, srcPipeline pipelin bd.txInfo = jptm.Info() bd.jptm = jptm - if jptm.Info().SrcBlobType == azblob.BlobPageBlob { + if jptm.Info().SrcBlobType == blob.BlobTypePageBlob { // page blobs need a file-specific pacer // See comments in uploader-pageBlob for the reasons, since the same reasons apply are are explained there bd.filePacer = newPageBlobAutoPacer(pageBlobInitialBytesPerSecond, jptm.Info().BlockSize, false, jptm.(common.ILogger)) - u, _ := url.Parse(jptm.Info().Source) - bd.pageRangeOptimizer = newPageRangeOptimizer(azblob.NewPageBlobURL(*u, srcPipeline), jptm.Context()) + srcPagBlobClient := common.CreatePageBlobClient(jptm.Info().Source, jptm.CredentialInfo(), jptm.CredentialOpOptions(), jptm.ClientOptions()) + + bd.pageRangeOptimizer = newPageRangeOptimizer(srcPagBlobClient, jptm.Context()) bd.pageRangeOptimizer.fetchPages() } } @@ -108,8 +111,8 @@ func (bd *blobDownloader) GenerateDownloadFunc(jptm IJobPartTransferMgr, srcPipe return createDownloadChunkFunc(jptm, id, func() { // If the range does not contain any data, write out empty data to disk without performing download - if bd.pageRangeOptimizer != nil && !bd.pageRangeOptimizer.doesRangeContainData( - azblob.PageRange{Start: id.OffsetInFile(), End: id.OffsetInFile() + length - 1}) { + pageRange := pageblob.PageRange{Start: to.Ptr(id.OffsetInFile()), End: to.Ptr(id.OffsetInFile() + length - 1)} + if bd.pageRangeOptimizer != nil && !bd.pageRangeOptimizer.doesRangeContainData(pageRange) { // queue an empty chunk err := destWriter.EnqueueChunk(jptm.Context(), id, length, dummyReader{}, false) @@ -133,23 +136,18 @@ func (bd *blobDownloader) GenerateDownloadFunc(jptm IJobPartTransferMgr, srcPipe } // download blob from start Index till startIndex + adjustedChunkSize - info := jptm.Info() - u, _ := url.Parse(info.Source) - srcBlobURL := azblob.NewBlobURL(*u, srcPipeline) + source := jptm.Info().Source + blobClient := common.CreateBlobClient(source, jptm.CredentialInfo(), jptm.CredentialOpOptions(), jptm.ClientOptions()) + // TODO (gapra) : This can be removed after Access Conditions fix is released. // set access conditions, to protect against inconsistencies from changes-while-being-read - accessConditions := azblob.BlobAccessConditions{ModifiedAccessConditions: azblob.ModifiedAccessConditions{IfUnmodifiedSince: jptm.LastModifiedTime()}} - if isInManagedDiskImportExportAccount(*u) { + lmt := jptm.LastModifiedTime().In(time.FixedZone("GMT", 0)) + accessConditions := &blob.AccessConditions{ModifiedAccessConditions: &blob.ModifiedAccessConditions{IfUnmodifiedSince: &lmt}} + if isInManagedDiskImportExportAccount(source) { // no access conditions (and therefore no if-modified checks) are supported on managed disk import/export (md-impexp) // They are also unsupported on old "md-" style export URLs on the new (2019) large size disks. // And if fact you can't have an md- URL in existence if the blob is mounted as a disk, so it won't be getting changed anyway, so we just treat all md-disks the same - accessConditions = azblob.BlobAccessConditions{} - } - - // Once track2 goes live, we'll not need to do this conversion/casting and can directly use CpkInfo & CpkScopeInfo - clientProvidedKey := azblob.ClientProvidedKeyOptions{} - if jptm.IsSourceEncrypted() { - clientProvidedKey = common.ToClientProvidedKeyOptions(jptm.CpkInfo(), jptm.CpkScopeInfo()) + accessConditions = nil } // At this point we create an HTTP(S) request for the desired portion of the blob, and @@ -157,7 +155,12 @@ func (bd *blobDownloader) GenerateDownloadFunc(jptm IJobPartTransferMgr, srcPipe // The Download method encapsulates any retries that may be necessary to get to the point of receiving response headers. jptm.LogChunkStatus(id, common.EWaitReason.HeaderResponse()) enrichedContext := withRetryNotification(jptm.Context(), bd.filePacer) - get, err := srcBlobURL.Download(enrichedContext, id.OffsetInFile(), length, accessConditions, false, clientProvidedKey) + get, err := blobClient.DownloadStream(enrichedContext, &blob.DownloadStreamOptions{ + Range: blob.HTTPRange{Offset: id.OffsetInFile(), Count: length}, + AccessConditions: accessConditions, + CPKInfo: jptm.CpkInfo(), + CPKScopeInfo: jptm.CpkScopeInfo(), + }) if err != nil { jptm.FailActiveDownload("Downloading response body", err) // cancel entire transfer because this chunk has failed return @@ -166,10 +169,9 @@ func (bd *blobDownloader) GenerateDownloadFunc(jptm IJobPartTransferMgr, srcPipe // Enqueue the response body to be written out to disk // The retryReader encapsulates any retries that may be necessary while downloading the body jptm.LogChunkStatus(id, common.EWaitReason.Body()) - retryReader := get.Body(azblob.RetryReaderOptions{ - MaxRetryRequests: destWriter.MaxRetryPerDownloadBody(), - NotifyFailedRead: common.NewReadLogFunc(jptm, u), - ClientProvidedKeyOptions: clientProvidedKey, + retryReader := get.NewRetryReader(enrichedContext, &blob.RetryReaderOptions{ + MaxRetries: int32(destWriter.MaxRetryPerDownloadBody()), + OnFailedRead: common.NewBlobReadLogFunc(jptm, source), }) defer retryReader.Close() err = destWriter.EnqueueChunk(jptm.Context(), id, length, newPacedResponseBody(jptm.Context(), retryReader, pacer), true) diff --git a/ste/downloader-blobFS.go b/ste/downloader-blobFS.go index 265abf282..08b856248 100644 --- a/ste/downloader-blobFS.go +++ b/ste/downloader-blobFS.go @@ -103,7 +103,7 @@ func (bd *blobFSDownloader) GenerateDownloadFunc(jptm IJobPartTransferMgr, srcPi jptm.LogChunkStatus(id, common.EWaitReason.Body()) retryReader := get.Body(azbfs.RetryReaderOptions{ MaxRetryRequests: MaxRetryPerDownloadBody, - NotifyFailedRead: common.NewReadLogFunc(jptm, u), + NotifyFailedRead: common.NewV1ReadLogFunc(jptm, u), }) defer retryReader.Close() err = destWriter.EnqueueChunk(jptm.Context(), id, length, newPacedResponseBody(jptm.Context(), retryReader, pacer), true) diff --git a/ste/fileAttributesHelper.go b/ste/fileAttributesHelper.go new file mode 100644 index 000000000..d54120227 --- /dev/null +++ b/ste/fileAttributesHelper.go @@ -0,0 +1,102 @@ +// Copyright © 2023 Microsoft +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package ste + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" +) + +// This is intended for easy conversion to/from local file attributes +const ( + FileAttributeNone uint32 = 0 + FileAttributeReadonly uint32 = 1 + FileAttributeHidden uint32 = 2 + FileAttributeSystem uint32 = 4 + FileAttributeArchive uint32 = 32 + FileAttributeTemporary uint32 = 256 + FileAttributeOffline uint32 = 4096 + FileAttributeNotContentIndexed uint32 = 8192 + FileAttributeNoScrubData uint32 = 131072 +) + +func FileAttributesFromUint32(attributes uint32) (*file.NTFSFileAttributes, error) { + attr := file.NTFSFileAttributes{} + if attributes&FileAttributeNone != 0 { + attr.None = true + } + if attributes&FileAttributeReadonly != 0 { + attr.ReadOnly = true + } + if attributes&FileAttributeHidden != 0 { + attr.Hidden = true + } + if attributes&FileAttributeSystem != 0 { + attr.System = true + } + if attributes&FileAttributeArchive != 0 { + attr.Archive = true + } + if attributes&FileAttributeTemporary != 0 { + attr.Temporary = true + } + if attributes&FileAttributeOffline != 0 { + attr.Offline = true + } + if attributes&FileAttributeNotContentIndexed != 0 { + attr.NotContentIndexed = true + } + if attributes&FileAttributeNoScrubData != 0 { + attr.NoScrubData = true + } + return &attr, nil +} + +func FileAttributesToUint32(attributes file.NTFSFileAttributes) uint32 { + var attr uint32 + if attributes.None { + attr |= FileAttributeNone + } + if attributes.ReadOnly { + attr |= FileAttributeReadonly + } + if attributes.Hidden { + attr |= FileAttributeHidden + } + if attributes.System { + attr |= FileAttributeSystem + } + if attributes.Archive { + attr |= FileAttributeArchive + } + if attributes.Temporary { + attr |= FileAttributeTemporary + } + if attributes.Offline { + attr |= FileAttributeOffline + } + if attributes.NotContentIndexed { + attr |= FileAttributeNotContentIndexed + } + if attributes.NoScrubData { + attr |= FileAttributeNoScrubData + } + return attr +} \ No newline at end of file diff --git a/ste/mgr-JobMgr.go b/ste/mgr-JobMgr.go index b747ecf4e..cda3e6ad6 100755 --- a/ste/mgr-JobMgr.go +++ b/ste/mgr-JobMgr.go @@ -23,7 +23,6 @@ package ste import ( "context" "fmt" - "github.com/Azure/azure-storage-blob-go/azblob" "net/http" "runtime" "strings" @@ -112,7 +111,7 @@ type IJobMgr interface { func NewJobMgr(concurrency ConcurrencySettings, jobID common.JobID, appCtx context.Context, cpuMon common.CPUMonitor, level common.LogLevel, commandString string, logFileFolder string, tuner ConcurrencyTuner, pacer PacerAdmin, slicePool common.ByteSlicePooler, cacheLimiter common.CacheLimiter, fileCountLimiter common.CacheLimiter, - jobLogger common.ILoggerResetable, daemonMode bool, sourceBlobToken azblob.Credential) IJobMgr { + jobLogger common.ILoggerResetable, daemonMode bool) IJobMgr { const channelSize = 100000 // PartsChannelSize defines the number of JobParts which can be placed into the // parts channel. Any JobPart which comes from FE and partChannel is full, @@ -188,7 +187,6 @@ func NewJobMgr(concurrency ConcurrencySettings, jobID common.JobID, appCtx conte cpuMon: cpuMon, jstm: &jstm, isDaemon: daemonMode, - sourceBlobToken: sourceBlobToken, /*Other fields remain zero-value until this job is scheduled */} jm.Reset(appCtx, commandString) // One routine constantly monitors the partsChannel. It takes the JobPartManager from @@ -338,7 +336,6 @@ type jobMgr struct { jstm *jobStatusManager isDaemon bool /* is it running as service */ - sourceBlobToken azblob.Credential } // ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -467,16 +464,22 @@ func (jm *jobMgr) AddJobOrder(order common.CopyJobPartOrderRequest) IJobPartMgr jppfn := JobPartPlanFileName(fmt.Sprintf(JobPartPlanFileNameFormat, order.JobID.String(), 0, DataSchemaVersion)) jppfn.Create(order) // Convert the order to a plan file + s2sSourceCredInfo := order.CredentialInfo.WithType(order.S2SSourceCredentialType) + if order.S2SSourceCredentialType == common.ECredentialType.OAuthToken() { + s2sSourceCredInfo.OAuthTokenInfo.TokenCredential = s2sSourceCredInfo.S2SSourceTokenCredential + } + jpm := &jobPartMgr{ - jobMgr: jm, - filename: jppfn, - sourceSAS: order.SourceRoot.SAS, - destinationSAS: order.DestinationRoot.SAS, - pacer: jm.pacer, - slicePool: jm.slicePool, - cacheLimiter: jm.cacheLimiter, - fileCountLimiter: jm.fileCountLimiter, - credInfo: order.CredentialInfo, + jobMgr: jm, + filename: jppfn, + sourceSAS: order.SourceRoot.SAS, + destinationSAS: order.DestinationRoot.SAS, + pacer: jm.pacer, + slicePool: jm.slicePool, + cacheLimiter: jm.cacheLimiter, + fileCountLimiter: jm.fileCountLimiter, + credInfo: order.CredentialInfo, + s2sSourceCredInfo: s2sSourceCredInfo, } jpm.planMMF = jpm.filename.Map() jm.jobPartMgrs.Set(order.PartNum, jpm) @@ -503,7 +506,7 @@ func (jm *jobMgr) AddJobOrder(order common.CopyJobPartOrderRequest) IJobPartMgr } func (jm *jobMgr) setFinalPartOrdered(partNum PartNumber, isFinalPart bool) { - newVal := common.Iffint32(isFinalPart, 1, 0) + newVal := int32(common.Iff(isFinalPart, 1, 0)) oldVal := atomic.SwapInt32(&jm.atomicFinalPartOrderedIndicator, newVal) if newVal == 0 && oldVal == 1 { // we just cleared the flag. Sanity check that. @@ -971,7 +974,7 @@ func (jm *jobMgr) scheduleJobParts() { go jm.poolSizer() startedPoolSizer = true } - jobPart.ScheduleTransfers(jm.Context(), jm.sourceBlobToken) + jobPart.ScheduleTransfers(jm.Context()) } } } @@ -1066,7 +1069,7 @@ func (jm *jobMgr) SuccessfulBytesInActiveFiles() uint64 { } func (jm *jobMgr) CancelPauseJobOrder(desiredJobStatus common.JobStatus) common.CancelPauseResumeResponse { - verb := common.IffString(desiredJobStatus == common.EJobStatus.Paused(), "pause", "cancel") + verb := common.Iff(desiredJobStatus == common.EJobStatus.Paused(), "pause", "cancel") jobID := jm.jobID // Search for the Part 0 of the Job, since the Part 0 status concludes the actual status of the Job @@ -1110,7 +1113,7 @@ func (jm *jobMgr) CancelPauseJobOrder(desiredJobStatus common.JobStatus) common. case common.EJobStatus.Paused(): // Logically, It's OK to pause an already-paused job jpp0.SetJobStatus(desiredJobStatus) msg := fmt.Sprintf("JobID=%v %s", jobID, - common.IffString(desiredJobStatus == common.EJobStatus.Paused(), "paused", "canceled")) + common.Iff(desiredJobStatus == common.EJobStatus.Paused(), "paused", "canceled")) if jm.ShouldLog(pipeline.LogInfo) { jm.Log(pipeline.LogInfo, msg) diff --git a/ste/mgr-JobPartMgr.go b/ste/mgr-JobPartMgr.go index 7887e5358..7539e97b9 100644 --- a/ste/mgr-JobPartMgr.go +++ b/ste/mgr-JobPartMgr.go @@ -3,6 +3,11 @@ package ste import ( "context" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "mime" "net" "net/http" @@ -16,8 +21,6 @@ import ( "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-azcopy/v10/azbfs" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" "golang.org/x/sync/semaphore" ) @@ -28,7 +31,7 @@ var DebugSkipFiles = make(map[string]bool) type IJobPartMgr interface { Plan() *JobPartPlanHeader - ScheduleTransfers(jobCtx context.Context, sourceBlobToken azblob.Credential) + ScheduleTransfers(jobCtx context.Context) StartJobXfer(jptm IJobPartTransferMgr) ReportTransferDone(status common.TransferStatus) uint32 GetOverwriteOption() common.OverwriteOption @@ -52,61 +55,26 @@ type IJobPartMgr interface { ExclusiveDestinationMap() *common.ExclusiveStringMap ChunkStatusLogger() common.ChunkStatusLogger common.ILogger + + CredentialInfo() common.CredentialInfo + ClientOptions() azcore.ClientOptions + S2SSourceCredentialInfo() common.CredentialInfo + S2SSourceClientOptions() azcore.ClientOptions + CredentialOpOptions() *common.CredentialOpOptions + SourceProviderPipeline() pipeline.Pipeline - SecondarySourceProviderPipeline() pipeline.Pipeline - SourceCredential() pipeline.Factory getOverwritePrompter() *overwritePrompter getFolderCreationTracker() FolderCreationTracker SecurityInfoPersistenceManager() *securityInfoPersistenceManager FolderDeletionManager() common.FolderDeletionManager - CpkInfo() common.CpkInfo - CpkScopeInfo() common.CpkScopeInfo + CpkInfo() *blob.CPKInfo + CpkScopeInfo() *blob.CPKScopeInfo IsSourceEncrypted() bool /* Status Manager Updates */ SendXferDoneMsg(msg xferDoneMsg) PropertiesToTransfer() common.SetPropertiesFlags } -type serviceAPIVersionOverride struct{} - -// ServiceAPIVersionOverride is a global variable in package ste which is a key to Service Api Version Value set in the every Job's context. -var ServiceAPIVersionOverride = serviceAPIVersionOverride{} - -// DefaultServiceApiVersion is the default value of service api version that is set as value to the ServiceAPIVersionOverride in every Job's context. -var DefaultServiceApiVersion = common.GetLifecycleMgr().GetEnvironmentVariable(common.EEnvironmentVariable.DefaultServiceApiVersion()) - -// NewVersionPolicy creates a factory that can override the service version -// set in the request header. -// If the context has key overwrite-current-version set to false, then x-ms-version in -// request is not overwritten else it will set x-ms-version to 207-04-17 -func NewVersionPolicyFactory() pipeline.Factory { - return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { - return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { - // get the service api version value using the ServiceAPIVersionOverride set in the context. - if value := ctx.Value(ServiceAPIVersionOverride); value != nil { - request.Header.Set("x-ms-version", value.(string)) - } - resp, err := next.Do(ctx, request) - return resp, err - } - }) -} - -func NewTrailingDotPolicyFactory(trailingDot common.TrailingDotOption, from common.Location) pipeline.Factory { - return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { - return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { - if trailingDot == common.ETrailingDotOption.Enable() { - request.Header.Set("x-ms-allow-trailing-dot", "true") - if from == common.ELocation.File() { - request.Header.Set("x-ms-source-allow-trailing-dot", "true") - } - request.Header.Set("x-ms-version", "2022-11-02") - } - return next.Do(ctx, request) - } - }) -} - // NewAzcopyHTTPClient creates a new HTTP client. // We must minimize use of this, and instead maximize re-use of the returned client object. // Why? Because that makes our connection pooling more efficient, and prevents us exhausting the @@ -175,39 +143,26 @@ func newAzcopyHTTPClientFactory(pipelineHTTPClient *http.Client) pipeline.Factor }) } -// NewBlobPipeline creates a Pipeline using the specified credentials and options. -func NewBlobPipeline(c azblob.Credential, o azblob.PipelineOptions, r XferRetryOptions, p pacer, client *http.Client, statsAcc *PipelineNetworkStats) pipeline.Pipeline { - if c == nil { - panic("c can't be nil") - } - // Closest to API goes first; closest to the wire goes last - f := []pipeline.Factory{ - azblob.NewTelemetryPolicyFactory(o.Telemetry), - azblob.NewUniqueRequestIDPolicyFactory(), - NewBlobXferRetryPolicyFactory(r), // actually retry the operation - newRetryNotificationPolicyFactory(), // record that a retry status was returned - c, - pipeline.MethodFactoryMarker(), // indicates at what stage in the pipeline the method factory is invoked - // NewPacerPolicyFactory(p), - NewVersionPolicyFactory(), - // Bump the service version when using the Cold access tier. - pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { - // TODO: Remove me when bumping the service version is no longer relevant. - return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { - if request.Header.Get("x-ms-access-tier") == common.EBlockBlobTier.Cold().String() { - request.Header.Set("x-ms-version", "2021-12-02") - } +func NewClientOptions(retry policy.RetryOptions, telemetry policy.TelemetryOptions, transport policy.Transporter, statsAcc *PipelineNetworkStats, log LogOptions, trailingDot *common.TrailingDotOption, from *common.Location) azcore.ClientOptions { + // Pipeline will look like + // [includeResponsePolicy, newAPIVersionPolicy (ignored), NewTelemetryPolicy, perCall, NewRetryPolicy, perRetry, NewLogPolicy, httpHeaderPolicy, bodyDownloadPolicy] + // TODO (gapra): Does this have to happen this happen here? + log.RequestLogOptions.SyslogDisabled = common.IsForceLoggingDisabled() + perCallPolicies := []policy.Policy{azruntime.NewRequestIDPolicy()} + // TODO : Default logging policy is not equivalent to old one. tracing HTTP request + perRetryPolicies := []policy.Policy{newRetryNotificationPolicy(), newVersionPolicy(), newColdTierPolicy(), NewTrailingDotPolicy(trailingDot, from), newLogPolicy(log), newStatsPolicy(statsAcc)} - return next.Do(ctx, request) - } - }), - NewRequestLogPolicyFactory(RequestLogOptions{ - LogWarningIfTryOverThreshold: o.RequestLog.LogWarningIfTryOverThreshold, - SyslogDisabled: common.IsForceLoggingDisabled(), - }), - newXferStatsPolicyFactory(statsAcc), + return azcore.ClientOptions{ + //APIVersion: , + //Cloud: , + //Logging: , + Retry: retry, + Telemetry: telemetry, + //TracingProvider: , + Transport: transport, + PerCallPolicies: perCallPolicies, + PerRetryPolicies: perRetryPolicies, } - return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: newAzcopyHTTPClientFactory(client), Log: o.Log}) } // NewBlobFSPipeline creates a pipeline for transfers to and from BlobFS Service @@ -220,8 +175,8 @@ func NewBlobFSPipeline(c azbfs.Credential, o azbfs.PipelineOptions, r XferRetryO f := []pipeline.Factory{ azbfs.NewTelemetryPolicyFactory(o.Telemetry), azbfs.NewUniqueRequestIDPolicyFactory(), - NewBFSXferRetryPolicyFactory(r), // actually retry the operation - newRetryNotificationPolicyFactory(), // record that a retry status was returned + NewBFSXferRetryPolicyFactory(r), // actually retry the operation + newV1RetryNotificationPolicyFactory(), // record that a retry status was returned } f = append(f, c) @@ -237,30 +192,6 @@ func NewBlobFSPipeline(c azbfs.Credential, o azbfs.PipelineOptions, r XferRetryO return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: newAzcopyHTTPClientFactory(client), Log: o.Log}) } -// NewFilePipeline creates a Pipeline using the specified credentials and options. -func NewFilePipeline(c azfile.Credential, o azfile.PipelineOptions, r azfile.RetryOptions, p pacer, client *http.Client, statsAcc *PipelineNetworkStats, trailingDot common.TrailingDotOption, from common.Location) pipeline.Pipeline { - if c == nil { - panic("c can't be nil") - } - // Closest to API goes first; closest to the wire goes last - f := []pipeline.Factory{ - azfile.NewTelemetryPolicyFactory(o.Telemetry), - azfile.NewUniqueRequestIDPolicyFactory(), - azfile.NewRetryPolicyFactory(r), // actually retry the operation - newRetryNotificationPolicyFactory(), // record that a retry status was returned - NewVersionPolicyFactory(), - NewTrailingDotPolicyFactory(trailingDot, from), - c, - pipeline.MethodFactoryMarker(), // indicates at what stage in the pipeline the method factory is invoked - NewRequestLogPolicyFactory(RequestLogOptions{ - LogWarningIfTryOverThreshold: o.RequestLog.LogWarningIfTryOverThreshold, - SyslogDisabled: common.IsForceLoggingDisabled(), - }), - newXferStatsPolicyFactory(statsAcc), - } - return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: newAzcopyHTTPClientFactory(client), Log: o.Log}) -} - // ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Holds the status of transfers in this jptm @@ -285,7 +216,11 @@ type jobPartMgr struct { // Since sas is not persisted in JobPartPlan file, it stripped from the destination and stored in memory in JobPart Manager destinationSAS string - credInfo common.CredentialInfo + credInfo common.CredentialInfo + clientOptions azcore.ClientOptions + s2sSourceCredInfo common.CredentialInfo + s2sSourceClientOptions azcore.ClientOptions + credOption *common.CredentialOpOptions // When the part is schedule to run (inprogress), the below fields are used planMMF *JobPartPlanMMF // This Job part plan's MMF @@ -326,7 +261,6 @@ type jobPartMgr struct { // Currently, this only sees use in ADLSG2->ADLSG2 ACL transfers. TODO: Remove it when we can reliably get/set ACLs on blob. secondaryPipeline pipeline.Pipeline - sourceCredential pipeline.Factory // must satisfy azblob.TokenCredential currently sourceProviderPipeline pipeline.Pipeline // TODO: Ditto secondarySourceProviderPipeline pipeline.Pipeline @@ -368,7 +302,7 @@ func (jpm *jobPartMgr) Plan() *JobPartPlanHeader { } // ScheduleTransfers schedules this job part's transfers. It is called when a new job part is ordered & is also called to resume a paused Job -func (jpm *jobPartMgr) ScheduleTransfers(jobCtx context.Context, sourceBlobToken azblob.Credential) { +func (jpm *jobPartMgr) ScheduleTransfers(jobCtx context.Context) { jobCtx = context.WithValue(jobCtx, ServiceAPIVersionOverride, DefaultServiceApiVersion) jpm.atomicTransfersDone = 0 // Reset the # of transfers done back to 0 // partplan file is opened and mapped when job part is added @@ -438,7 +372,8 @@ func (jpm *jobPartMgr) ScheduleTransfers(jobCtx context.Context, sourceBlobToken jpm.priority = plan.Priority - jpm.createPipelines(jobCtx, sourceBlobToken) // pipeline is created per job part manager + jpm.clientInfo() + jpm.createPipelines(jobCtx) // pipeline is created per job part manager // *** Schedule this job part's transfers *** for t := uint32(0); t < plan.NumTransfers; t++ { @@ -539,13 +474,84 @@ func (jpm *jobPartMgr) RescheduleTransfer(jptm IJobPartTransferMgr) { jpm.jobMgr.ScheduleTransfer(jpm.priority, jptm) } -func (jpm *jobPartMgr) createPipelines(ctx context.Context, sourceBlobToken azblob.Credential) { +func (jpm *jobPartMgr) clientInfo() { + jobState := jpm.jobMgr.getInMemoryTransitJobState() + + // Destination credential + if jpm.credInfo.CredentialType == common.ECredentialType.Unknown() { + jpm.credInfo = jobState.CredentialInfo + } + fromTo := jpm.planMMF.Plan().FromTo + + // S2S source credential + // Default credential type assumed to be SAS + s2sSourceCredInfo := common.CredentialInfo{CredentialType: common.ECredentialType.Anonymous()} + // For Blob and BlobFS, there are other options for the source credential + if (fromTo.IsS2S() || fromTo.IsDownload()) && (fromTo.From() == common.ELocation.Blob() || fromTo.From() == common.ELocation.BlobFS()) { + if fromTo.To().CanForwardOAuthTokens() && jobState.S2SSourceCredentialType.IsAzureOAuth() { + if jpm.s2sSourceCredInfo.CredentialType == common.ECredentialType.Unknown() { + s2sSourceCredInfo = jobState.CredentialInfo.WithType(jobState.S2SSourceCredentialType) + } + } else if fromTo.IsDownload() && (jobState.CredentialInfo.CredentialType.IsAzureOAuth() || jobState.CredentialInfo.CredentialType == common.ECredentialType.SharedKey()) { + s2sSourceCredInfo = jobState.CredentialInfo + } + } + jpm.s2sSourceCredInfo = s2sSourceCredInfo + + jpm.credOption = &common.CredentialOpOptions{ + LogInfo: func(str string) { jpm.Log(pipeline.LogInfo, str) }, + LogError: func(str string) { jpm.Log(pipeline.LogError, str) }, + Panic: jpm.Panic, + CallerID: fmt.Sprintf("JobID=%v, Part#=%d", jpm.Plan().JobID, jpm.Plan().PartNum), + Cancel: jpm.jobMgr.Cancel, + } + + retryOptions := policy.RetryOptions{ + MaxRetries: UploadMaxTries, + TryTimeout: UploadTryTimeout, + RetryDelay: UploadRetryDelay, + MaxRetryDelay: UploadMaxRetryDelay, + } + + var userAgent string + if fromTo.From() == common.ELocation.S3() { + userAgent = common.S3ImportUserAgent + } else if fromTo.From() == common.ELocation.GCP() { + userAgent = common.GCPImportUserAgent + } else if fromTo.From() == common.ELocation.Benchmark() || fromTo.To() == common.ELocation.Benchmark() { + userAgent = common.BenchmarkUserAgent + } else { + userAgent = common.GetLifecycleMgr().AddUserAgentPrefix(common.UserAgent) + } + telemetryOptions := policy.TelemetryOptions{ApplicationID: userAgent} + + httpClient := jpm.jobMgr.HttpClient() + networkStats := jpm.jobMgr.PipelineNetworkStats() + logOptions := LogOptions{LogOptions: jpm.jobMgr.PipelineLogInfo()} + + var sourceTrailingDot *common.TrailingDotOption + var trailingDot *common.TrailingDotOption + var from *common.Location + if (fromTo.IsS2S() || fromTo.IsDownload()) && (fromTo.From() == common.ELocation.File()) { + sourceTrailingDot = &jpm.planMMF.Plan().DstFileData.TrailingDot + } + if fromTo.IsS2S() && fromTo.To() == common.ELocation.File() || + fromTo.IsUpload() && fromTo.To() == common.ELocation.File() || + fromTo.IsDownload() && fromTo.From() == common.ELocation.File() || + fromTo.IsSetProperties() && fromTo.From() == common.ELocation.File() || + fromTo.IsDelete() && fromTo.From() == common.ELocation.File() { + trailingDot = &jpm.planMMF.Plan().DstFileData.TrailingDot + if fromTo.IsS2S() { + from = to.Ptr(fromTo.From()) + } + } + jpm.s2sSourceClientOptions = NewClientOptions(retryOptions, telemetryOptions, httpClient, nil, logOptions, sourceTrailingDot, nil) + jpm.clientOptions = NewClientOptions(retryOptions, telemetryOptions, httpClient, networkStats, logOptions, trailingDot, from)} + +func (jpm *jobPartMgr) createPipelines(ctx context.Context) { if atomic.SwapUint32(&jpm.atomicPipelinesInitedIndicator, 1) != 0 { panic("init client and pipelines for same jobPartMgr twice") } - if jpm.sourceCredential == nil { - jpm.sourceCredential = sourceBlobToken - } fromTo := jpm.planMMF.Plan().FromTo credInfo := jpm.credInfo if jpm.credInfo.CredentialType == common.ECredentialType.Unknown() { @@ -582,38 +588,6 @@ func (jpm *jobPartMgr) createPipelines(ctx context.Context, sourceBlobToken azbl // Create source info provider's pipeline for S2S copy or download (in some cases). // BlobFS and Blob will utilize the Blob source info provider, as they are the "same" resource, but provide different details on both endpoints if (fromTo.IsS2S() || fromTo.IsDownload()) && (fromTo.From() == common.ELocation.Blob() || fromTo.From() == common.ELocation.BlobFS()) { - sourceCred := azblob.NewAnonymousCredential() - jobState := jpm.jobMgr.getInMemoryTransitJobState() - if fromTo.To().CanForwardOAuthTokens() && jobState.S2SSourceCredentialType.IsAzureOAuth() { - if jpm.sourceCredential == nil { - sourceCred = common.CreateBlobCredential(ctx, jobState.CredentialInfo.WithType(jobState.S2SSourceCredentialType), credOption) - jpm.sourceCredential = sourceCred - } - } else if fromTo.IsDownload() && jobState.CredentialInfo.CredentialType.IsAzureOAuth() { - sourceCred = common.CreateBlobCredential(ctx, jobState.CredentialInfo, credOption) - } else if fromTo.IsDownload() && jobState.CredentialInfo.CredentialType == common.ECredentialType.SharedKey() { - lcm := common.GetLifecycleMgr() - var err error - // Convert the shared key credential to a blob credential & re-use it - sourceCred, err = azblob.NewSharedKeyCredential(lcm.GetEnvironmentVariable(common.EEnvironmentVariable.AccountName()), lcm.GetEnvironmentVariable(common.EEnvironmentVariable.AccountKey())) - if err != nil { - jpm.Panic(fmt.Errorf("sanity check: failed to initialize shared key credential: %w", err)) - } - } - - jpm.sourceProviderPipeline = NewBlobPipeline( - sourceCred, - azblob.PipelineOptions{ - Log: jpm.jobMgr.PipelineLogInfo(), - Telemetry: azblob.TelemetryOptions{ - Value: userAgent, - }, - }, - xferRetryOption, - jpm.pacer, - jpm.jobMgr.HttpClient(), - statsAccForSip) - // Prepare to pull dfs properties if we're working with BlobFS if fromTo.From() == common.ELocation.BlobFS() || jpm.Plan().PreservePermissions.IsTruthy() || jpm.Plan().PreservePOSIXProperties { credential := common.CreateBlobFSCredential(ctx, credInfo, credOption) @@ -631,27 +605,6 @@ func (jpm *jobPartMgr) createPipelines(ctx context.Context, sourceBlobToken azbl statsAccForSip) } } - // Set up a source pipeline for files if necessary - if (fromTo.IsS2S() || fromTo.IsDownload()) && (fromTo.From() == common.ELocation.File()) { - jpm.sourceProviderPipeline = NewFilePipeline( - azfile.NewAnonymousCredential(), - azfile.PipelineOptions{ - Log: jpm.jobMgr.PipelineLogInfo(), - Telemetry: azfile.TelemetryOptions{ - Value: userAgent, - }, - }, azfile.RetryOptions{ - Policy: azfile.RetryPolicyExponential, - MaxTries: UploadMaxTries, - TryTimeout: UploadTryTimeout, - RetryDelay: UploadRetryDelay, - MaxRetryDelay: UploadMaxRetryDelay, - }, jpm.pacer, - jpm.jobMgr.HttpClient(), - statsAccForSip, - jpm.planMMF.Plan().DstFileData.TrailingDot, - fromTo.From()) - } switch { case fromTo.IsS2S() && (fromTo.To() == common.ELocation.Blob() || fromTo.To() == common.ELocation.BlobFS()), // destination determines pipeline for S2S, blobfs uses blob for S2S @@ -659,20 +612,7 @@ func (jpm *jobPartMgr) createPipelines(ctx context.Context, sourceBlobToken azbl fromTo.IsDownload() && fromTo.From() == common.ELocation.Blob(), // source determines pipeline for download fromTo.IsSetProperties() && (fromTo.From() == common.ELocation.Blob() || fromTo.From() == common.ELocation.BlobFS()), // source determines pipeline for set properties, blobfs uses blob for set properties fromTo.IsDelete() && fromTo.From() == common.ELocation.Blob(): // ditto for delete - credential := common.CreateBlobCredential(ctx, credInfo, credOption) jpm.Log(pipeline.LogInfo, fmt.Sprintf("JobID=%v, credential type: %v", jpm.Plan().JobID, credInfo.CredentialType)) - jpm.pipeline = NewBlobPipeline( - credential, - azblob.PipelineOptions{ - Log: jpm.jobMgr.PipelineLogInfo(), - Telemetry: azblob.TelemetryOptions{ - Value: userAgent, - }, - }, - xferRetryOption, - jpm.pacer, - jpm.jobMgr.HttpClient(), - jpm.jobMgr.PipelineNetworkStats()) // If we need to write specifically to the gen2 endpoint, we should have this available. if fromTo.To() == common.ELocation.BlobFS() || jpm.Plan().PreservePermissions.IsTruthy() || jpm.Plan().PreservePOSIXProperties { @@ -708,58 +648,6 @@ func (jpm *jobPartMgr) createPipelines(ctx context.Context, sourceBlobToken azbl jpm.pacer, jpm.jobMgr.HttpClient(), jpm.jobMgr.PipelineNetworkStats()) - - // Just in case we need to talk to blob while we're at it - var blobCred = azblob.NewAnonymousCredential() - if credInfo.CredentialType == common.ECredentialType.SharedKey() { - lcm := common.GetLifecycleMgr() - var err error - // Convert the shared key credential to a blob credential & re-use it - blobCred, err = azblob.NewSharedKeyCredential(lcm.GetEnvironmentVariable(common.EEnvironmentVariable.AccountName()), lcm.GetEnvironmentVariable(common.EEnvironmentVariable.AccountKey())) - if err != nil { - jpm.Panic(fmt.Errorf("sanity check: failed to initialize shared key credential: %w", err)) - } - } else if credInfo.CredentialType != common.ECredentialType.Anonymous() { - blobCred = common.CreateBlobCredential(ctx, credInfo, credOption) - } - - jpm.secondaryPipeline = NewBlobPipeline( - blobCred, - azblob.PipelineOptions{ - Log: jpm.jobMgr.PipelineLogInfo(), - Telemetry: azblob.TelemetryOptions{ - Value: userAgent, - }, - }, - xferRetryOption, - jpm.pacer, - jpm.jobMgr.HttpClient(), - jpm.jobMgr.PipelineNetworkStats()) - case fromTo.IsS2S() && fromTo.To() == common.ELocation.File(), - fromTo.IsUpload() && fromTo.To() == common.ELocation.File(), - fromTo.IsDownload() && fromTo.From() == common.ELocation.File(), - fromTo.IsSetProperties() && fromTo.From() == common.ELocation.File(), - fromTo.IsDelete() && fromTo.From() == common.ELocation.File(): - jpm.pipeline = NewFilePipeline( - azfile.NewAnonymousCredential(), - azfile.PipelineOptions{ - Log: jpm.jobMgr.PipelineLogInfo(), - Telemetry: azfile.TelemetryOptions{ - Value: userAgent, - }, - }, - azfile.RetryOptions{ - Policy: azfile.RetryPolicyExponential, - MaxTries: UploadMaxTries, - TryTimeout: UploadTryTimeout, - RetryDelay: UploadRetryDelay, - MaxRetryDelay: UploadMaxRetryDelay, - }, - jpm.pacer, - jpm.jobMgr.HttpClient(), - jpm.jobMgr.PipelineNetworkStats(), - jpm.planMMF.Plan().DstFileData.TrailingDot, - fromTo.From()) } } @@ -862,11 +750,11 @@ func (jpm *jobPartMgr) BlobTiers() (blockBlobTier common.BlockBlobTier, pageBlob return jpm.blockBlobTier, jpm.pageBlobTier } -func (jpm *jobPartMgr) CpkInfo() common.CpkInfo { +func (jpm *jobPartMgr) CpkInfo() *blob.CPKInfo { return common.GetCpkInfo(jpm.cpkOptions.CpkInfo) } -func (jpm *jobPartMgr) CpkScopeInfo() common.CpkScopeInfo { +func (jpm *jobPartMgr) CpkScopeInfo() *blob.CPKScopeInfo { return common.GetCpkScopeInfo(jpm.cpkOptions.CpkScopeInfo) } @@ -943,7 +831,6 @@ func (jpm *jobPartMgr) ReportTransferDone(status common.TransferStatus) (transfe jpm.Plan().SetJobPartStatus(common.EJobStatus.EnhanceJobStatusInfo(jppi.transfersSkipped > 0, jppi.transfersFailed > 0, jppi.transfersCompleted > 0)) jpm.jobMgr.ReportJobPartDone(jppi) - jpm.Log(pipeline.LogInfo, fmt.Sprintf("JobID=%v, Part#=%d, TransfersDone=%d of %d", jpm.planMMF.Plan().JobID, jpm.planMMF.Plan().PartNum, transfersDone, jpm.planMMF.Plan().NumTransfers)) @@ -995,16 +882,28 @@ func (jpm *jobPartMgr) ChunkStatusLogger() common.ChunkStatusLogger { return jpm.jobMgr.ChunkStatusLogger() } -func (jpm *jobPartMgr) SourceProviderPipeline() pipeline.Pipeline { - return jpm.sourceProviderPipeline +func (jpm *jobPartMgr) CredentialInfo() common.CredentialInfo { + return jpm.credInfo +} + +func (jpm *jobPartMgr) S2SSourceCredentialInfo() common.CredentialInfo { + return jpm.s2sSourceCredInfo +} + +func (jpm *jobPartMgr) ClientOptions() azcore.ClientOptions { + return jpm.clientOptions } -func (jpm *jobPartMgr) SecondarySourceProviderPipeline() pipeline.Pipeline { - return jpm.secondarySourceProviderPipeline +func (jpm *jobPartMgr) S2SSourceClientOptions() azcore.ClientOptions { + return jpm.s2sSourceClientOptions } -func (jpm *jobPartMgr) SourceCredential() pipeline.Factory { - return jpm.sourceCredential +func (jpm *jobPartMgr) CredentialOpOptions() *common.CredentialOpOptions { + return jpm.credOption +} + +func (jpm *jobPartMgr) SourceProviderPipeline() pipeline.Pipeline { + return jpm.sourceProviderPipeline } /* Status update messages should not fail */ diff --git a/ste/mgr-JobPartTransferMgr.go b/ste/mgr-JobPartTransferMgr.go index 5223f68a8..42f571ed5 100644 --- a/ste/mgr-JobPartTransferMgr.go +++ b/ste/mgr-JobPartTransferMgr.go @@ -2,7 +2,11 @@ package ste import ( "context" + "errors" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "net/http" "strings" "sync/atomic" @@ -13,8 +17,6 @@ import ( "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-azcopy/v10/azbfs" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" ) type IJobPartTransferMgr interface { @@ -59,9 +61,16 @@ type IJobPartTransferMgr interface { OccupyAConnection() // TODO: added for debugging purpose. remove later ReleaseAConnection() + + CredentialInfo() common.CredentialInfo + ClientOptions() azcore.ClientOptions + S2SSourceCredentialInfo() common.CredentialInfo + GetS2SSourceTokenCredential(ctx context.Context) (token *string, err error) + S2SSourceClientOptions() azcore.ClientOptions + CredentialOpOptions() *common.CredentialOpOptions + SourceProviderPipeline() pipeline.Pipeline - SecondarySourceProviderPipeline() pipeline.Pipeline - SourceCredential() pipeline.Factory + FailActiveUpload(where string, err error) FailActiveDownload(where string, err error) FailActiveUploadWithStatus(where string, err error, failureStatus common.TransferStatus) @@ -90,10 +99,9 @@ type IJobPartTransferMgr interface { FolderDeletionManager() common.FolderDeletionManager GetDestinationRoot() string ShouldInferContentType() bool - CpkInfo() common.CpkInfo - CpkScopeInfo() common.CpkScopeInfo + CpkInfo() *blob.CPKInfo + CpkScopeInfo() *blob.CPKScopeInfo IsSourceEncrypted() bool - GetS2SSourceBlobTokenCredential() azblob.TokenCredential PropertiesToTransfer() common.SetPropertiesFlags ResetSourceSize() // sets source size to 0 (made to be used by setProperties command to make number of bytes transferred = 0) SuccessfulBytesTransferred() int64 @@ -122,13 +130,12 @@ type TransferInfo struct { S2SInvalidMetadataHandleOption common.InvalidMetadataHandleOption // Blob - SrcBlobType azblob.BlobType // used for both S2S and for downloads to local from blob - S2SSrcBlobTier azblob.AccessTierType // AccessTierType (string) is used to accommodate service-side support matrix change. + SrcBlobType blob.BlobType // used for both S2S and for downloads to local from blob + S2SSrcBlobTier blob.AccessTier // AccessTierType (string) is used to accommodate service-side support matrix change. - RehydratePriority azblob.RehydratePriorityType + RehydratePriority blob.RehydratePriority } - func (i TransferInfo) IsFilePropertiesTransfer() bool { return i.EntityType == common.EEntityType.FileProperties() } @@ -216,20 +223,6 @@ type jobPartTransferMgr struct { chunkChannel chan<- ChunkMsg*/ } -func (jptm *jobPartTransferMgr) GetS2SSourceBlobTokenCredential() azblob.TokenCredential { - cred := jptm.SourceCredential() - - if cred == nil { - return nil - } else { - if tc, ok := cred.(azblob.TokenCredential); ok { - return tc - } else { - return nil - } - } -} - func (jptm *jobPartTransferMgr) GetOverwritePrompter() *overwritePrompter { return jptm.jobPartMgr.getOverwritePrompter() } @@ -366,7 +359,7 @@ func (jptm *jobPartTransferMgr) Info() TransferInfo { } } } - blockSize = common.Iffint64(blockSize > common.MaxBlockBlobBlockSize, common.MaxBlockBlobBlockSize, blockSize) + blockSize = common.Iff(blockSize > common.MaxBlockBlobBlockSize, common.MaxBlockBlobBlockSize, blockSize) var srcBlobTags common.BlobTags if blobTags != nil { @@ -541,11 +534,11 @@ func (jptm *jobPartTransferMgr) BlobTiers() (blockBlobTier common.BlockBlobTier, return jptm.jobPartMgr.BlobTiers() } -func (jptm *jobPartTransferMgr) CpkInfo() common.CpkInfo { +func (jptm *jobPartTransferMgr) CpkInfo() *blob.CPKInfo { return jptm.jobPartMgr.CpkInfo() } -func (jptm *jobPartTransferMgr) CpkScopeInfo() common.CpkScopeInfo { +func (jptm *jobPartTransferMgr) CpkScopeInfo() *blob.CPKScopeInfo { return jptm.jobPartMgr.CpkScopeInfo() } @@ -850,11 +843,11 @@ func (jptm *jobPartTransferMgr) Log(level pipeline.LogLevel, msg string) { } func (jptm *jobPartTransferMgr) ErrorCodeAndString(err error) (int, string) { + var respErr *azcore.ResponseError + if errors.As(err, &respErr) { + return respErr.StatusCode, respErr.RawResponse.Status + } switch e := err.(type) { - case azblob.StorageError: - return e.Response().StatusCode, e.Response().Status - case azfile.StorageError: - return e.Response().StatusCode, e.Response().Status case azbfs.StorageError: return e.Response().StatusCode, e.Response().Status default: @@ -967,16 +960,47 @@ func (jptm *jobPartTransferMgr) ReportTransferDone() uint32 { return jptm.jobPartMgr.ReportTransferDone(jptm.jobPartPlanTransfer.TransferStatus()) } -func (jptm *jobPartTransferMgr) SourceProviderPipeline() pipeline.Pipeline { - return jptm.jobPartMgr.SourceProviderPipeline() +func (jptm *jobPartTransferMgr) CredentialInfo() common.CredentialInfo { + return jptm.jobPartMgr.CredentialInfo() } -func (jptm *jobPartTransferMgr) SecondarySourceProviderPipeline() pipeline.Pipeline { - return jptm.jobPartMgr.SecondarySourceProviderPipeline() +func (jptm *jobPartTransferMgr) ClientOptions() azcore.ClientOptions { + return jptm.jobPartMgr.ClientOptions() } -func (jptm *jobPartTransferMgr) SourceCredential() pipeline.Factory { - return jptm.jobPartMgr.SourceCredential() +func (jptm *jobPartTransferMgr) S2SSourceCredentialInfo() common.CredentialInfo { + return jptm.jobPartMgr.S2SSourceCredentialInfo() +} + +func (jptm *jobPartTransferMgr) GetS2SSourceTokenCredential(ctx context.Context) (*string, error) { + if jptm.S2SSourceCredentialInfo().CredentialType.IsAzureOAuth() { + tokenInfo := jptm.S2SSourceCredentialInfo().OAuthTokenInfo + tc, err := tokenInfo.GetTokenCredential() + if err != nil { + return nil, err + } + scope := []string{common.StorageScope} + if jptm.S2SSourceCredentialInfo().CredentialType == common.ECredentialType.MDOAuthToken() { + scope = []string{common.ManagedDiskScope} + } + + token, err := tc.GetToken(ctx, policy.TokenRequestOptions{Scopes: scope}) + t := "Bearer " + token.Token + return &t, err + } + return nil, nil +} + +func (jptm *jobPartTransferMgr) S2SSourceClientOptions() azcore.ClientOptions { + return jptm.jobPartMgr.S2SSourceClientOptions() +} + +func (jptm *jobPartTransferMgr) CredentialOpOptions() *common.CredentialOpOptions { + return jptm.jobPartMgr.CredentialOpOptions() +} + +func (jptm *jobPartTransferMgr) SourceProviderPipeline() pipeline.Pipeline { + return jptm.jobPartMgr.SourceProviderPipeline() } func (jptm *jobPartTransferMgr) SecurityInfoPersistenceManager() *securityInfoPersistenceManager { diff --git a/ste/pacedReadSeeker.go b/ste/pacedReadSeeker.go index d284dec31..37d13ae65 100644 --- a/ste/pacedReadSeeker.go +++ b/ste/pacedReadSeeker.go @@ -38,7 +38,7 @@ type pacedReadSeeker struct { p pacer } -func newPacedRequestBody(ctx context.Context, requestBody io.ReadSeeker, p pacer) io.ReadSeeker { +func newPacedRequestBody(ctx context.Context, requestBody io.ReadSeeker, p pacer) io.ReadSeekCloser { if p == nil { panic("p must not be nil") } diff --git a/ste/remoteObjectExists.go b/ste/remoteObjectExists.go index 2e05de186..e1d3b97be 100644 --- a/ste/remoteObjectExists.go +++ b/ste/remoteObjectExists.go @@ -21,6 +21,11 @@ package ste import ( + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + sharefile "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-storage-azcopy/v10/common" "net/http" "time" ) @@ -34,14 +39,32 @@ type lastModifiedTimerProvider interface { LastModified() time.Time } +type blobPropertiesResponseAdapter struct { + blob.GetPropertiesResponse +} + +func (a blobPropertiesResponseAdapter) LastModified() time.Time { + return common.IffNotNil(a.GetPropertiesResponse.LastModified, time.Time{}) +} + +type filePropertiesResponseAdapter struct { + sharefile.GetPropertiesResponse +} + +func (a filePropertiesResponseAdapter) LastModified() time.Time { + return common.IffNotNil(a.GetPropertiesResponse.LastModified, time.Time{}) +} + // remoteObjectExists takes the error returned when trying to access a remote object, sees whether is // a "not found" error. If the object exists (i.e. error is nil) it returns (true, nil). If the // error is a "not found" error, it returns (false, nil). Else it returns false and the original error. // The initial, dummy, parameter, is to allow callers to conveniently call it with functions that return a tuple // - even though we only need the error. func remoteObjectExists(props lastModifiedTimerProvider, errWhenAccessingRemoteObject error) (bool, time.Time, error) { - - if typedErr, ok := errWhenAccessingRemoteObject.(responseError); ok && typedErr.Response().StatusCode == http.StatusNotFound { + var respErr *azcore.ResponseError + if errors.As(errWhenAccessingRemoteObject, &respErr) && respErr.StatusCode == http.StatusNotFound { + return false, time.Time{}, nil // 404 error, so it does NOT exist + } else if typedErr, ok := errWhenAccessingRemoteObject.(responseError); ok && typedErr.Response().StatusCode == http.StatusNotFound { return false, time.Time{}, nil // 404 error, so it does NOT exist } else if errWhenAccessingRemoteObject != nil { return false, time.Time{}, errWhenAccessingRemoteObject // some other error happened, so we return it diff --git a/ste/s2sCopier-URLToBlob.go b/ste/s2sCopier-URLToBlob.go index a9bcd05e9..54f8cd340 100644 --- a/ste/s2sCopier-URLToBlob.go +++ b/ste/s2sCopier-URLToBlob.go @@ -22,13 +22,13 @@ package ste import ( "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "net/url" "strings" "sync" "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" ) var LogBlobConversionOnce = &sync.Once{} @@ -46,11 +46,13 @@ func newURLToBlobCopier(jptm IJobPartTransferMgr, destination string, p pipeline return nil, err } - bURLParts := azblob.NewBlobURLParts(*u) + bURLParts, err := blob.ParseURL(u.String()) + if err != nil { + return nil, err + } bURLParts.Host = strings.Replace(bURLParts.Host, ".dfs", ".blob", 1) - newDest := bURLParts.URL() - destination = newDest.String() + destination = bURLParts.String() LogBlobConversionOnce.Do(func() { common.GetLifecycleMgr().Info("Switching to blob endpoint to write to destination account. There are some limitations when writing between blob/dfs endpoints. " + @@ -58,12 +60,12 @@ func newURLToBlobCopier(jptm IJobPartTransferMgr, destination string, p pipeline }) } - var targetBlobType azblob.BlobType + var targetBlobType blob.BlobType blobTypeOverride := jptm.BlobTypeOverride() // BlobTypeOverride is copy info specified by user if blobTypeOverride != common.EBlobType.Detect() { // If a blob type is explicitly specified, determine it. - targetBlobType = blobTypeOverride.ToAzBlobType() + targetBlobType = blobTypeOverride.ToBlobType() if jptm.ShouldLog(pipeline.LogInfo) { // To save fmt.Sprintf jptm.LogTransferInfo( @@ -85,10 +87,10 @@ func newURLToBlobCopier(jptm IJobPartTransferMgr, destination string, p pipeline fileName := srcURL.Path - targetBlobType = inferBlobType(fileName, azblob.BlobBlockBlob) + targetBlobType = inferBlobType(fileName, blob.BlobTypeBlockBlob) } - if targetBlobType != azblob.BlobBlockBlob { + if targetBlobType != blob.BlobTypeBlockBlob { jptm.LogTransferInfo(pipeline.LogInfo, srcInfoProvider.RawSource(), destination, fmt.Sprintf("Autodetected %s blob type as %s.", jptm.Info().Source, targetBlobType)) } } @@ -102,26 +104,26 @@ func newURLToBlobCopier(jptm IJobPartTransferMgr, destination string, p pipeline } if jptm.Info().IsFolderPropertiesTransfer() { - return newBlobFolderSender(jptm, destination, p, pacer, srcInfoProvider) + return newBlobFolderSender(jptm, destination, srcInfoProvider) } else if jptm.Info().EntityType == common.EEntityType.Symlink() { - return newBlobSymlinkSender(jptm, destination, p, pacer, srcInfoProvider) + return newBlobSymlinkSender(jptm, destination, srcInfoProvider) } switch targetBlobType { - case azblob.BlobBlockBlob: - return newURLToBlockBlobCopier(jptm, destination, p, pacer, srcInfoProvider) - case azblob.BlobAppendBlob: - return newURLToAppendBlobCopier(jptm, destination, p, pacer, srcInfoProvider) - case azblob.BlobPageBlob: - return newURLToPageBlobCopier(jptm, destination, p, pacer, srcInfoProvider) + case blob.BlobTypeBlockBlob: + return newURLToBlockBlobCopier(jptm, destination, pacer, srcInfoProvider) + case blob.BlobTypeAppendBlob: + return newURLToAppendBlobCopier(jptm, destination, pacer, srcInfoProvider) + case blob.BlobTypePageBlob: + return newURLToPageBlobCopier(jptm, destination, pacer, srcInfoProvider) default: if jptm.ShouldLog(pipeline.LogDebug) { // To save fmt.Sprintf jptm.LogTransferInfo( pipeline.LogDebug, srcInfoProvider.RawSource(), destination, - fmt.Sprintf("BlobType %q is used for destination blob by default.", azblob.BlobBlockBlob)) + fmt.Sprintf("BlobType %q is used for destination blob by default.", blob.BlobTypeBlockBlob)) } - return newURLToBlockBlobCopier(jptm, destination, p, pacer, srcInfoProvider) + return newURLToBlockBlobCopier(jptm, destination, pacer, srcInfoProvider) } } diff --git a/ste/securityInfoPersistenceManager.go b/ste/securityInfoPersistenceManager.go index 48d164bfd..7fa77b47a 100644 --- a/ste/securityInfoPersistenceManager.go +++ b/ste/securityInfoPersistenceManager.go @@ -2,11 +2,13 @@ package ste import ( "context" - "github.com/Azure/azure-pipeline-go/pipeline" - "net/url" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + filesas "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "github.com/Azure/azure-storage-azcopy/v10/common" "sync" - "github.com/Azure/azure-storage-file-go/azfile" "github.com/golang/groupcache/lru" ) @@ -33,12 +35,14 @@ func newSecurityInfoPersistenceManager(ctx context.Context) *securityInfoPersist // Technically, yes, GetSDDLFromID can be used in conjunction with PutSDDL. // Being realistic though, GetSDDLFromID will only be called when downloading, // and PutSDDL will only be called when uploading/doing S2S. -func (sipm *securityInfoPersistenceManager) PutSDDL(sddlString string, shareURL azfile.ShareURL) (string, error) { - fileURLParts := azfile.NewFileURLParts(shareURL.URL()) - fileURLParts.SAS = azfile.SASQueryParameters{} // Clear the SAS query params since it's extra unnecessary length. - rawfURL := fileURLParts.URL() +func (sipm *securityInfoPersistenceManager) PutSDDL(sddlString string, shareClient *share.Client) (string, error) { + fileURLParts, err := file.ParseURL(shareClient.URL()) + if err != nil { + return "", err + } + fileURLParts.SAS = filesas.QueryParameters{} // Clear the SAS query params since it's extra unnecessary length. - sddlKey := rawfURL.String() + "|SDDL|" + sddlString + sddlKey := fileURLParts.String() + "|SDDL|" + sddlString // Acquire a read lock. sipm.sipmMu.RLock() @@ -52,13 +56,13 @@ func (sipm *securityInfoPersistenceManager) PutSDDL(sddlString string, shareURL return id.(string), nil } - cResp, err := shareURL.CreatePermission(sipm.ctx, sddlString) + cResp, err := shareClient.CreatePermission(sipm.ctx, sddlString, nil) if err != nil { return "", err } - permKey := cResp.FilePermissionKey() + permKey := *cResp.FilePermissionKey sipm.sipmMu.Lock() sipm.cache.Add(sddlKey, permKey) @@ -67,12 +71,13 @@ func (sipm *securityInfoPersistenceManager) PutSDDL(sddlString string, shareURL return permKey, nil } -func (sipm *securityInfoPersistenceManager) GetSDDLFromID(id string, shareURL url.URL, p pipeline.Pipeline) (string, error) { - fileURLParts := azfile.NewFileURLParts(shareURL) - fileURLParts.SAS = azfile.SASQueryParameters{} // Clear the SAS query params since it's extra unnecessary length. - rawfURL := fileURLParts.URL() - - sddlKey := rawfURL.String() + "|ID|" + id +func (sipm *securityInfoPersistenceManager) GetSDDLFromID(id string, shareURL string, credInfo common.CredentialInfo, credOpOptions *common.CredentialOpOptions, clientOptions azcore.ClientOptions) (string, error) { + fileURLParts, err := filesas.ParseURL(shareURL) + if err != nil { + return "", err + } + fileURLParts.SAS = filesas.QueryParameters{} // Clear the SAS query params since it's extra unnecessary length. + sddlKey := fileURLParts.String() + "|ID|" + id sipm.sipmMu.Lock() // fetch from the cache @@ -84,24 +89,27 @@ func (sipm *securityInfoPersistenceManager) GetSDDLFromID(id string, shareURL ur return perm.(string), nil } - actionableShareURL := azfile.NewShareURL(shareURL, p) + actionableShareURL := common.CreateShareClient(shareURL, credInfo, credOpOptions, clientOptions) // to clarify, the GetPermission call only works against the share root, and not against a share snapshot // if we detect that the source is a snapshot, we simply get rid of the snapshot value if len(fileURLParts.ShareSnapshot) != 0 { - fileURLParts := azfile.NewFileURLParts(shareURL) + fileURLParts, err := filesas.ParseURL(shareURL) + if err != nil { + return "", err + } fileURLParts.ShareSnapshot = "" // clear the snapshot value - actionableShareURL = azfile.NewShareURL(fileURLParts.URL(), p) + actionableShareURL = common.CreateShareClient(fileURLParts.String(), credInfo, credOpOptions, clientOptions) } - si, err := actionableShareURL.GetPermission(sipm.ctx, id) + si, err := actionableShareURL.GetPermission(sipm.ctx, id, nil) if err != nil { return "", err } sipm.sipmMu.Lock() // If we got the permission fine, commit to the cache. - sipm.cache.Add(sddlKey, si.Permission) + sipm.cache.Add(sddlKey, *si.Permission) sipm.sipmMu.Unlock() - return si.Permission, nil + return *si.Permission, nil } diff --git a/ste/sender-appendBlob.go b/ste/sender-appendBlob.go index 55c502de8..600b9cb7e 100644 --- a/ste/sender-appendBlob.go +++ b/ste/sender-appendBlob.go @@ -22,30 +22,30 @@ package ste import ( "context" - "net/url" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "time" "github.com/Azure/azure-pipeline-go/pipeline" - "github.com/Azure/azure-storage-blob-go/azblob" "golang.org/x/sync/semaphore" "github.com/Azure/azure-storage-azcopy/v10/common" ) type appendBlobSenderBase struct { - jptm IJobPartTransferMgr - destAppendBlobURL azblob.AppendBlobURL - chunkSize int64 - numChunks uint32 - pacer pacer + jptm IJobPartTransferMgr + destAppendBlobClient *appendblob.Client + chunkSize int64 + numChunks uint32 + pacer pacer // Headers and other info that we will apply to the destination // object. For S2S, these come from the source service. // When sending local data, they are computed based on // the properties of the local file - headersToApply azblob.BlobHTTPHeaders - metadataToApply azblob.Metadata - blobTagsToApply azblob.BlobTagsMap - cpkToApply azblob.ClientProvidedKeyOptions + headersToApply blob.HTTPHeaders + metadataToApply common.Metadata + blobTagsToApply common.BlobTags sip ISourceInfoProvider @@ -54,14 +54,14 @@ type appendBlobSenderBase struct { type appendBlockFunc = func() -func newAppendBlobSenderBase(jptm IJobPartTransferMgr, destination string, p pipeline.Pipeline, pacer pacer, srcInfoProvider ISourceInfoProvider) (*appendBlobSenderBase, error) { +func newAppendBlobSenderBase(jptm IJobPartTransferMgr, destination string, pacer pacer, srcInfoProvider ISourceInfoProvider) (*appendBlobSenderBase, error) { transferInfo := jptm.Info() // compute chunk count chunkSize := transferInfo.BlockSize // If the given chunk Size for the Job is greater than maximum append blob block size i.e 4 MB, // then set chunkSize as 4 MB. - chunkSize = common.Iffint64( + chunkSize = common.Iff( chunkSize > common.MaxAppendBlobBlockSize, common.MaxAppendBlobBlockSize, chunkSize) @@ -69,32 +69,23 @@ func newAppendBlobSenderBase(jptm IJobPartTransferMgr, destination string, p pip srcSize := transferInfo.SourceSize numChunks := getNumChunks(srcSize, chunkSize) - destURL, err := url.Parse(destination) - if err != nil { - return nil, err - } - - destAppendBlobURL := azblob.NewAppendBlobURL(*destURL, p) + destAppendBlobClient := common.CreateAppendBlobClient(destination, jptm.CredentialInfo(), jptm.CredentialOpOptions(), jptm.ClientOptions()) props, err := srcInfoProvider.Properties() if err != nil { return nil, err } - // Once track2 goes live, we'll not need to do this conversion/casting and can directly use CpkInfo & CpkScopeInfo - cpkToApply := common.ToClientProvidedKeyOptions(jptm.CpkInfo(), jptm.CpkScopeInfo()) - return &appendBlobSenderBase{ jptm: jptm, - destAppendBlobURL: destAppendBlobURL, + destAppendBlobClient: destAppendBlobClient, chunkSize: chunkSize, numChunks: numChunks, pacer: pacer, - headersToApply: props.SrcHTTPHeaders.ToAzBlobHTTPHeaders(), - metadataToApply: props.SrcMetadata.ToAzBlobMetadata(), - blobTagsToApply: props.SrcBlobTags.ToAzBlobTagsMap(), + headersToApply: props.SrcHTTPHeaders.ToBlobHTTPHeaders(), + metadataToApply: props.SrcMetadata, + blobTagsToApply: props.SrcBlobTags, sip: srcInfoProvider, - cpkToApply: cpkToApply, soleChunkFuncSemaphore: semaphore.NewWeighted(1)}, nil } @@ -111,7 +102,8 @@ func (s *appendBlobSenderBase) NumChunks() uint32 { } func (s *appendBlobSenderBase) RemoteFileExists() (bool, time.Time, error) { - return remoteObjectExists(s.destAppendBlobURL.GetProperties(s.jptm.Context(), azblob.BlobAccessConditions{}, s.cpkToApply)) + properties, err := s.destAppendBlobClient.GetProperties(s.jptm.Context(), &blob.GetPropertiesOptions{CPKInfo: s.jptm.CpkInfo()}) + return remoteObjectExists(blobPropertiesResponseAdapter{properties}, err) } // Returns a chunk-func for sending append blob to remote @@ -147,22 +139,30 @@ func (s *appendBlobSenderBase) Prologue(ps common.PrologueState) (destinationMod if s.jptm.ShouldInferContentType() { // sometimes, specifically when reading local files, we have more info // about the file type at this time than what we had before - s.headersToApply.ContentType = ps.GetInferredContentType(s.jptm) + s.headersToApply.BlobContentType = ps.GetInferredContentType(s.jptm) } blobTags := s.blobTagsToApply - separateSetTagsRequired := separateSetTagsRequired(blobTags) - if separateSetTagsRequired || len(blobTags) == 0 { + setTags := separateSetTagsRequired(blobTags) + if setTags || len(blobTags) == 0 { blobTags = nil } - if _, err := s.destAppendBlobURL.Create(s.jptm.Context(), s.headersToApply, s.metadataToApply, azblob.BlobAccessConditions{}, blobTags, s.cpkToApply, azblob.ImmutabilityPolicyOptions{}); err != nil { + _, err := s.destAppendBlobClient.Create(s.jptm.Context(), &appendblob.CreateOptions{ + HTTPHeaders: &s.headersToApply, + Metadata: s.metadataToApply, + Tags: blobTags, + CPKInfo: s.jptm.CpkInfo(), + CPKScopeInfo: s.jptm.CpkScopeInfo(), + }) + if err != nil { s.jptm.FailActiveSend("Creating blob", err) return } destinationModified = true - if separateSetTagsRequired { - if _, err := s.destAppendBlobURL.SetTags(s.jptm.Context(), nil, nil, nil, s.blobTagsToApply); err != nil { + if setTags { + _, err = s.destAppendBlobClient.SetTags(s.jptm.Context(), s.blobTagsToApply, nil) + if err != nil { s.jptm.Log(pipeline.LogWarning, err.Error()) } } @@ -184,9 +184,23 @@ func (s *appendBlobSenderBase) Cleanup() { // to be consistent with other deletionContext, cancelFunc := context.WithTimeout(context.WithValue(context.Background(), ServiceAPIVersionOverride, DefaultServiceApiVersion), 30*time.Second) defer cancelFunc() - _, err := s.destAppendBlobURL.Delete(deletionContext, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) + _, err := s.destAppendBlobClient.Delete(deletionContext, nil) if err != nil { - jptm.LogError(s.destAppendBlobURL.String(), "Delete (incomplete) Append Blob ", err) + jptm.LogError(s.destAppendBlobClient.URL(), "Delete (incomplete) Append Blob ", err) } } } + +// GetDestinationLength gets the destination length. +func (s *appendBlobSenderBase) GetDestinationLength() (int64, error) { + prop, err := s.destAppendBlobClient.GetProperties(s.jptm.Context(), &blob.GetPropertiesOptions{CPKInfo: s.jptm.CpkInfo()}) + + if err != nil { + return -1, err + } + + if prop.ContentLength == nil { + return -1, fmt.Errorf("destination content length not returned") + } + return *prop.ContentLength, nil +} diff --git a/ste/sender-appendBlobFromLocal.go b/ste/sender-appendBlobFromLocal.go index a2c8fb950..cd87feb1f 100644 --- a/ste/sender-appendBlobFromLocal.go +++ b/ste/sender-appendBlobFromLocal.go @@ -21,9 +21,8 @@ package ste import ( - "github.com/Azure/azure-pipeline-go/pipeline" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" ) type appendBlobUploader struct { @@ -37,7 +36,7 @@ func (u *appendBlobUploader) Prologue(ps common.PrologueState) (destinationModif if u.jptm.Info().PreservePOSIXProperties { if unixSIP, ok := u.sip.(IUNIXPropertyBearingSourceInfoProvider); ok { // Clone the metadata before we write to it, we shouldn't be writing to the same metadata as every other blob. - u.metadataToApply = common.Metadata(u.metadataToApply).Clone().ToAzBlobMetadata() + u.metadataToApply = u.metadataToApply.Clone() statAdapter, err := unixSIP.GetUNIXProperties() if err != nil { @@ -51,8 +50,8 @@ func (u *appendBlobUploader) Prologue(ps common.PrologueState) (destinationModif return u.appendBlobSenderBase.Prologue(ps) } -func newAppendBlobUploader(jptm IJobPartTransferMgr, destination string, p pipeline.Pipeline, pacer pacer, sip ISourceInfoProvider) (sender, error) { - senderBase, err := newAppendBlobSenderBase(jptm, destination, p, pacer, sip) +func newAppendBlobUploader(jptm IJobPartTransferMgr, destination string, pacer pacer, sip ISourceInfoProvider) (sender, error) { + senderBase, err := newAppendBlobSenderBase(jptm, destination, pacer, sip) if err != nil { return nil, err } @@ -68,10 +67,13 @@ func (u *appendBlobUploader) GenerateUploadFunc(id common.ChunkID, blockIndex in appendBlockFromLocal := func() { u.jptm.LogChunkStatus(id, common.EWaitReason.Body()) body := newPacedRequestBody(u.jptm.Context(), reader, u.pacer) - _, err := u.destAppendBlobURL.AppendBlock(u.jptm.Context(), body, - azblob.AppendBlobAccessConditions{ - AppendPositionAccessConditions: azblob.AppendPositionAccessConditions{IfAppendPositionEqual: id.OffsetInFile()}, - }, nil, u.cpkToApply) + offset := id.OffsetInFile() + _, err := u.destAppendBlobClient.AppendBlock(u.jptm.Context(), body, + &appendblob.AppendBlockOptions{ + AppendPositionAccessConditions: &appendblob.AppendPositionAccessConditions{AppendPosition: &offset}, + CPKInfo: u.jptm.CpkInfo(), + CPKScopeInfo: u.jptm.CpkScopeInfo(), + }) if err != nil { u.jptm.FailActiveUpload("Appending block", err) return @@ -88,21 +90,11 @@ func (u *appendBlobUploader) Epilogue() { if jptm.IsLive() { tryPutMd5Hash(jptm, u.md5Channel, func(md5Hash []byte) error { epilogueHeaders := u.headersToApply - epilogueHeaders.ContentMD5 = md5Hash - _, err := u.destAppendBlobURL.SetHTTPHeaders(jptm.Context(), epilogueHeaders, azblob.BlobAccessConditions{}) + epilogueHeaders.BlobContentMD5 = md5Hash + _, err := u.destAppendBlobClient.SetHTTPHeaders(jptm.Context(), epilogueHeaders, nil) return err }) } u.appendBlobSenderBase.Epilogue() } - -func (u *appendBlobUploader) GetDestinationLength() (int64, error) { - prop, err := u.destAppendBlobURL.GetProperties(u.jptm.Context(), azblob.BlobAccessConditions{}, u.cpkToApply) - - if err != nil { - return -1, err - } - - return prop.ContentLength(), nil -} diff --git a/ste/sender-appendBlobFromURL.go b/ste/sender-appendBlobFromURL.go index 3279779ca..369b62324 100644 --- a/ste/sender-appendBlobFromURL.go +++ b/ste/sender-appendBlobFromURL.go @@ -21,21 +21,19 @@ package ste import ( - "net/url" - - "github.com/Azure/azure-pipeline-go/pipeline" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" ) type urlToAppendBlobCopier struct { appendBlobSenderBase - srcURL url.URL + srcURL string } -func newURLToAppendBlobCopier(jptm IJobPartTransferMgr, destination string, p pipeline.Pipeline, pacer pacer, srcInfoProvider IRemoteSourceInfoProvider) (s2sCopier, error) { - senderBase, err := newAppendBlobSenderBase(jptm, destination, p, pacer, srcInfoProvider) +func newURLToAppendBlobCopier(jptm IJobPartTransferMgr, destination string, pacer pacer, srcInfoProvider IRemoteSourceInfoProvider) (s2sCopier, error) { + senderBase, err := newAppendBlobSenderBase(jptm, destination, pacer, srcInfoProvider) if err != nil { return nil, err } @@ -47,7 +45,7 @@ func newURLToAppendBlobCopier(jptm IJobPartTransferMgr, destination string, p pi return &urlToAppendBlobCopier{ appendBlobSenderBase: *senderBase, - srcURL: *srcURL}, nil + srcURL: srcURL}, nil } // Returns a chunk-func for blob copies @@ -58,10 +56,20 @@ func (c *urlToAppendBlobCopier) GenerateCopyFunc(id common.ChunkID, blockIndex i if err := c.pacer.RequestTrafficAllocation(c.jptm.Context(), adjustedChunkSize); err != nil { c.jptm.FailActiveUpload("Pacing block", err) } - _, err := c.destAppendBlobURL.AppendBlockFromURL(c.jptm.Context(), c.srcURL, id.OffsetInFile(), adjustedChunkSize, - azblob.AppendBlobAccessConditions{ - AppendPositionAccessConditions: azblob.AppendPositionAccessConditions{IfAppendPositionEqual: id.OffsetInFile()}, - }, azblob.ModifiedAccessConditions{}, nil, c.cpkToApply, c.jptm.GetS2SSourceBlobTokenCredential()) + offset := id.OffsetInFile() + token, err := c.jptm.GetS2SSourceTokenCredential(c.jptm.Context()) + if err != nil { + c.jptm.FailActiveS2SCopy("Getting source token credential", err) + return + } + _, err = c.destAppendBlobClient.AppendBlockFromURL(c.jptm.Context(), c.srcURL, + &appendblob.AppendBlockFromURLOptions{ + Range: blob.HTTPRange{Offset: offset, Count: adjustedChunkSize}, + AppendPositionAccessConditions: &appendblob.AppendPositionAccessConditions{AppendPosition: &offset}, + CPKInfo: c.jptm.CpkInfo(), + CPKScopeInfo: c.jptm.CpkScopeInfo(), + CopySourceAuthorization: token, + }) if err != nil { c.jptm.FailActiveS2SCopy("Appending block from URL", err) return @@ -70,13 +78,3 @@ func (c *urlToAppendBlobCopier) GenerateCopyFunc(id common.ChunkID, blockIndex i return c.generateAppendBlockToRemoteFunc(id, appendBlockFromURL) } - -// GetDestinationLength gets the destination length. -func (c *urlToAppendBlobCopier) GetDestinationLength() (int64, error) { - properties, err := c.destAppendBlobURL.GetProperties(c.jptm.Context(), azblob.BlobAccessConditions{}, c.cpkToApply) - if err != nil { - return -1, err - } - - return properties.ContentLength(), nil -} diff --git a/ste/sender-azureFile.go b/ste/sender-azureFile.go index 7e7b97921..64c6e4b0f 100644 --- a/ste/sender-azureFile.go +++ b/ste/sender-azureFile.go @@ -24,20 +24,25 @@ import ( "context" "errors" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/directory" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" + filesas "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" "net/http" "net/url" "strings" "time" "github.com/Azure/azure-pipeline-go/pipeline" - "github.com/Azure/azure-storage-file-go/azfile" "github.com/Azure/azure-storage-azcopy/v10/common" ) type URLHolder interface { - URL() url.URL - String() string + URL() string } // azureFileSenderBase implements both IFolderSender and (most of) IFileSender. @@ -48,11 +53,12 @@ type URLHolder interface { // (The alternative would be to have the likes of newAzureFilesUploader call sip.EntityType and return a different type // if the entity type is folder). type azureFileSenderBase struct { - jptm IJobPartTransferMgr - fileOrDirURL URLHolder + jptm IJobPartTransferMgr + fileOrDirClient URLHolder + shareClient *share.Client + serviceClient *service.Client chunkSize int64 numChunks uint32 - pipeline pipeline.Pipeline pacer pacer ctx context.Context sip ISourceInfoProvider @@ -60,12 +66,13 @@ type azureFileSenderBase struct { // object. For S2S, these come from the source service. // When sending local data, they are computed based on // the properties of the local file - headersToApply azfile.FileHTTPHeaders - metadataToApply azfile.Metadata + headersToApply file.HTTPHeaders + smbPropertiesToApply file.SMBProperties + permissionsToApply file.Permissions + metadataToApply common.Metadata } -func newAzureFileSenderBase(jptm IJobPartTransferMgr, destination string, p pipeline.Pipeline, pacer pacer, sip ISourceInfoProvider) (*azureFileSenderBase, error) { - +func newAzureFileSenderBase(jptm IJobPartTransferMgr, destination string, pacer pacer, sip ISourceInfoProvider) (*azureFileSenderBase, error) { info := jptm.Info() // compute chunk size (irrelevant but harmless for folders) @@ -83,12 +90,6 @@ func newAzureFileSenderBase(jptm IJobPartTransferMgr, destination string, p pipe // compute num chunks (irrelevant but harmless for folders) numChunks := getNumChunks(info.SourceSize, chunkSize) - // make sure URL is parsable - destURL, err := url.Parse(destination) - if err != nil { - return nil, err - } - // due to the REST parity feature added in 2019-02-02, the File APIs are no longer backward compatible // so we must use the latest SDK version to stay safe // TODO: Should we get rid of this one? @@ -98,33 +99,61 @@ func newAzureFileSenderBase(jptm IJobPartTransferMgr, destination string, p pipe return nil, err } - var h URLHolder + fileURLParts, err := file.ParseURL(destination) + if err != nil { + return nil, err + } + shareName := fileURLParts.ShareName + shareSnapshot := fileURLParts.ShareSnapshot + directoryOrFilePath := fileURLParts.DirectoryOrFilePath + // Strip any non-service related things away + fileURLParts.ShareName = "" + fileURLParts.ShareSnapshot = "" + fileURLParts.DirectoryOrFilePath = "" + serviceClient := common.CreateFileServiceClient(fileURLParts.String(), jptm.CredentialInfo(), jptm.CredentialOpOptions(), jptm.ClientOptions()) + + shareClient := serviceClient.NewShareClient(shareName) + if shareSnapshot != "" { + shareClient, err = shareClient.WithSnapshot(shareSnapshot) + if err != nil { + return nil, err + } + } + + var client URLHolder if info.IsFolderPropertiesTransfer() { - h = azfile.NewDirectoryURL(*destURL, p) + if directoryOrFilePath == "" { + client = shareClient.NewRootDirectoryClient() + } else { + client = shareClient.NewDirectoryClient(directoryOrFilePath) + } } else { - h = azfile.NewFileURL(*destURL, p) + client = shareClient.NewRootDirectoryClient().NewFileClient(directoryOrFilePath) } return &azureFileSenderBase{ jptm: jptm, - fileOrDirURL: h, + serviceClient: serviceClient, + shareClient: shareClient, + fileOrDirClient: client, chunkSize: chunkSize, numChunks: numChunks, - pipeline: p, pacer: pacer, ctx: jptm.Context(), - headersToApply: props.SrcHTTPHeaders.ToAzFileHTTPHeaders(), + headersToApply: props.SrcHTTPHeaders.ToFileHTTPHeaders(), + smbPropertiesToApply: file.SMBProperties{}, + permissionsToApply: file.Permissions{}, sip: sip, - metadataToApply: props.SrcMetadata.ToAzFileMetadata(), + metadataToApply: props.SrcMetadata, }, nil } -func (u *azureFileSenderBase) fileURL() azfile.FileURL { - return u.fileOrDirURL.(azfile.FileURL) +func (u *azureFileSenderBase) getFileClient() *file.Client { + return u.fileOrDirClient.(*file.Client) } -func (u *azureFileSenderBase) dirURL() azfile.DirectoryURL { - return u.fileOrDirURL.(azfile.DirectoryURL) +func (u *azureFileSenderBase) getDirectoryClient() *directory.Client { + return u.fileOrDirClient.(*directory.Client) } func (u *azureFileSenderBase) ChunkSize() int64 { @@ -136,7 +165,8 @@ func (u *azureFileSenderBase) NumChunks() uint32 { } func (u *azureFileSenderBase) RemoteFileExists() (bool, time.Time, error) { - return remoteObjectExists(u.fileURL().GetProperties(u.ctx)) + props, err := u.getFileClient().GetProperties(u.ctx, nil) + return remoteObjectExists(filePropertiesResponseAdapter{props}, err) } func (u *azureFileSenderBase) Prologue(state common.PrologueState) (destinationModified bool) { @@ -151,13 +181,13 @@ func (u *azureFileSenderBase) Prologue(state common.PrologueState) (destinationM u.headersToApply.ContentType = state.GetInferredContentType(u.jptm) } - stage, err := u.addPermissionsToHeaders(info, u.fileURL().URL()) + stage, err := u.addPermissionsToHeaders(info, u.getFileClient().URL()) if err != nil { jptm.FailActiveSend(stage, err) return } - stage, err = u.addSMBPropertiesToHeaders(info, u.fileURL().URL()) + stage, err = u.addSMBPropertiesToHeaders(info) if err != nil { jptm.FailActiveSend(stage, err) return @@ -165,23 +195,22 @@ func (u *azureFileSenderBase) Prologue(state common.PrologueState) (destinationM // Turn off readonly at creation time (because if its set at creation time, we won't be // able to upload any data to the file!). We'll set it in epilogue, if necessary. - creationHeaders := u.headersToApply - if creationHeaders.FileAttributes != nil { - revisedAttribs := creationHeaders.FileAttributes.Remove(azfile.FileAttributeReadonly) - creationHeaders.FileAttributes = &revisedAttribs + creationProperties := u.smbPropertiesToApply + if creationProperties.Attributes != nil { + creationProperties.Attributes.ReadOnly = false } err = u.DoWithOverrideReadOnly(u.ctx, func() (interface{}, error) { - return u.fileURL().Create(u.ctx, info.SourceSize, creationHeaders, u.metadataToApply) + return u.getFileClient().Create(u.ctx, info.SourceSize, &file.CreateOptions{HTTPHeaders: &u.headersToApply, Permissions: &u.permissionsToApply, SMBProperties: &creationProperties, Metadata: u.metadataToApply}) }, - u.fileOrDirURL, + u.fileOrDirClient, u.jptm.GetForceIfReadOnly()) - if strErr, ok := err.(azfile.StorageError); ok && strErr.ServiceCode() == azfile.ServiceCodeParentNotFound { + if fileerror.HasCode(err, fileerror.ParentNotFound) { // Create the parent directories of the file. Note share must be existed, as the files are listed from share or directory. - jptm.Log(pipeline.LogError, fmt.Sprintf("%s: %s \n AzCopy going to create parent directories of the Azure files", strErr.ServiceCode(), strErr.Error())) - err = AzureFileParentDirCreator{}.CreateParentDirToRoot(u.ctx, u.fileURL(), u.pipeline, u.jptm.GetFolderCreationTracker()) + jptm.Log(pipeline.LogError, fmt.Sprintf("%s: %s \n AzCopy going to create parent directories of the Azure files", fileerror.ParentNotFound, err.Error())) + err = AzureFileParentDirCreator{}.CreateParentDirToRoot(u.ctx, u.getFileClient(), u.serviceClient, u.jptm.GetFolderCreationTracker()) if err != nil { u.jptm.FailActiveUpload("Creating parent directory", err) } @@ -189,9 +218,14 @@ func (u *azureFileSenderBase) Prologue(state common.PrologueState) (destinationM // retrying file creation err = u.DoWithOverrideReadOnly(u.ctx, func() (interface{}, error) { - return u.fileURL().Create(u.ctx, info.SourceSize, creationHeaders, u.metadataToApply) + return u.getFileClient().Create(u.ctx, info.SourceSize, &file.CreateOptions{ + HTTPHeaders: &u.headersToApply, + SMBProperties: &creationProperties, + Permissions: &u.permissionsToApply, + Metadata: u.metadataToApply, + }) }, - u.fileOrDirURL, + u.fileOrDirClient, u.jptm.GetForceIfReadOnly()) } @@ -210,11 +244,11 @@ func (u *azureFileSenderBase) DoWithOverrideReadOnly(ctx context.Context, action // try the action _, err := action() - if strErr, ok := err.(azfile.StorageError); ok && (strErr.ServiceCode() == azfile.ServiceCodeParentNotFound || strErr.ServiceCode() == azfile.ServiceCodeShareNotFound) { + if fileerror.HasCode(err, fileerror.ParentNotFound, fileerror.ShareNotFound) { return err } failedAsReadOnly := false - if strErr, ok := err.(azfile.StorageError); ok && strErr.ServiceCode() == azfile.ServiceCodeReadOnlyAttribute { + if fileerror.HasCode(err, fileerror.ReadOnlyAttribute) { failedAsReadOnly = true } if !failedAsReadOnly { @@ -227,15 +261,24 @@ func (u *azureFileSenderBase) DoWithOverrideReadOnly(ctx context.Context, action } // did fail as readonly, and forcing is enabled - none := azfile.FileAttributeNone - if f, ok := targetFileOrDir.(azfile.FileURL); ok { - h := azfile.FileHTTPHeaders{} - h.FileAttributes = &none // clear the attribs - _, err = f.SetHTTPHeaders(ctx, h) - } else if d, ok := targetFileOrDir.(azfile.DirectoryURL); ok { + if f, ok := targetFileOrDir.(*file.Client); ok { + h := file.HTTPHeaders{} + _, err = f.SetHTTPHeaders(ctx, &file.SetHTTPHeadersOptions{ + HTTPHeaders: &h, + SMBProperties: &file.SMBProperties{ + // clear the attributes + Attributes: &file.NTFSFileAttributes{None: true}, + }, + }) + } else if d, ok := targetFileOrDir.(*directory.Client); ok { // this code path probably isn't used, since ReadOnly (in Windows file systems at least) // only applies to the files in a folder, not to the folder itself. But we'll leave the code here, for now. - _, err = d.SetProperties(ctx, azfile.SMBProperties{FileAttributes: &none}) + _, err = d.SetProperties(ctx, &directory.SetPropertiesOptions{ + FileSMBProperties: &file.SMBProperties{ + // clear the attributes + Attributes: &file.NTFSFileAttributes{None: true}, + }, + }) } else { err = errors.New("cannot remove read-only attribute from unknown target type") } @@ -248,7 +291,7 @@ func (u *azureFileSenderBase) DoWithOverrideReadOnly(ctx context.Context, action return err } -func (u *azureFileSenderBase) addPermissionsToHeaders(info TransferInfo, destUrl url.URL) (stage string, err error) { +func (u *azureFileSenderBase) addPermissionsToHeaders(info TransferInfo, destURL string) (stage string, err error) { if !info.PreserveSMBPermissions.IsTruthy() { return "", nil } @@ -257,26 +300,26 @@ func (u *azureFileSenderBase) addPermissionsToHeaders(info TransferInfo, destUrl if sddlSIP, ok := u.sip.(ISMBPropertyBearingSourceInfoProvider); ok { // If both sides are Azure Files... if fSIP, ok := sddlSIP.(*fileSourceInfoProvider); ok { - srcURL, err := url.Parse(info.Source) - common.PanicIfErr(err) - srcURLParts := azfile.NewFileURLParts(*srcURL) - dstURLParts := azfile.NewFileURLParts(destUrl) + srcURLParts, err := file.ParseURL(info.Source) + common.PanicIfErr(err) + dstURLParts, err := file.ParseURL(destURL) + common.PanicIfErr(err) // and happen to be the same account and share, we can get away with using the same key and save a trip. if srcURLParts.Host == dstURLParts.Host && srcURLParts.ShareName == dstURLParts.ShareName { - u.headersToApply.PermissionKey = &fSIP.cachedPermissionKey + u.permissionsToApply.PermissionKey = &fSIP.cachedPermissionKey } } // If we didn't do the workaround, then let's get the SDDL and put it later. - if u.headersToApply.PermissionKey == nil || *u.headersToApply.PermissionKey == "" { + if u.permissionsToApply.PermissionKey == nil || *u.permissionsToApply.PermissionKey == "" { pString, err := sddlSIP.GetSDDL() // Sending "" to the service is invalid, but the service will return it sometimes (e.g. on file shares) // Thus, we'll let the files SDK fill in "inherit" for us, so the service is happy. if pString != "" { - u.headersToApply.PermissionString = &pString + u.permissionsToApply.Permission = &pString } if err != nil { @@ -285,25 +328,21 @@ func (u *azureFileSenderBase) addPermissionsToHeaders(info TransferInfo, destUrl } } - if u.headersToApply.PermissionString != nil && len(*u.headersToApply.PermissionString) > filesServiceMaxSDDLSize { - fURLParts := azfile.NewFileURLParts(destUrl) - fURLParts.DirectoryOrFilePath = "" - shareURL := azfile.NewShareURL(fURLParts.URL(), u.pipeline) - + if u.permissionsToApply.Permission != nil && len(*u.permissionsToApply.Permission) > filesServiceMaxSDDLSize { sipm := u.jptm.SecurityInfoPersistenceManager() - pkey, err := sipm.PutSDDL(*u.headersToApply.PermissionString, shareURL) - u.headersToApply.PermissionKey = &pkey + pkey, err := sipm.PutSDDL(*u.permissionsToApply.Permission, u.shareClient) + u.permissionsToApply.PermissionKey = &pkey if err != nil { return "Putting permissions", err } ePermString := "" - u.headersToApply.PermissionString = &ePermString + u.permissionsToApply.Permission = &ePermString } return "", nil } -func (u *azureFileSenderBase) addSMBPropertiesToHeaders(info TransferInfo, destUrl url.URL) (stage string, err error) { +func (u *azureFileSenderBase) addSMBPropertiesToHeaders(info TransferInfo) (stage string, err error) { if !info.PreserveSMBInfo { return "", nil } @@ -319,27 +358,26 @@ func (u *azureFileSenderBase) addSMBPropertiesToHeaders(info TransferInfo, destU defer func() { // recover from potential panics and output raw properties for debug purposes if panicerr := recover(); panicerr != nil { stage = "Reading SMB properties" - pAdapt := smbProps.(*azfile.SMBPropertyAdapter) - attr := pAdapt.PropertySource.FileAttributes() - lwt := pAdapt.PropertySource.FileLastWriteTime() - fct := pAdapt.PropertySource.FileCreationTime() + attr, _ := smbProps.FileAttributes() + lwt := smbProps.FileLastWriteTime() + fct := smbProps.FileCreationTime() err = fmt.Errorf("failed to read SMB properties (%w)! Raw data: attr: `%s` lwt: `%s`, fct: `%s`", err, attr, lwt, fct) } }() } - attribs := smbProps.FileAttributes() - u.headersToApply.FileAttributes = &attribs + attribs, _ := smbProps.FileAttributes() + u.smbPropertiesToApply.Attributes = attribs if info.ShouldTransferLastWriteTime() { lwTime := smbProps.FileLastWriteTime() - u.headersToApply.FileLastWriteTime = &lwTime + u.smbPropertiesToApply.LastWriteTime = &lwTime } creationTime := smbProps.FileCreationTime() - u.headersToApply.FileCreationTime = &creationTime + u.smbPropertiesToApply.CreationTime = &creationTime } return "", nil } @@ -353,7 +391,11 @@ func (u *azureFileSenderBase) Epilogue() { // So when we uploaded the ranges, we've unintentionally changed the last-write-time. if u.jptm.IsLive() && u.jptm.Info().PreserveSMBInfo { // This is an extra round trip, but we can live with that for these relatively rare cases - _, err := u.fileURL().SetHTTPHeaders(u.ctx, u.headersToApply) + _, err := u.getFileClient().SetHTTPHeaders(u.ctx, &file.SetHTTPHeadersOptions{ + HTTPHeaders: &u.headersToApply, + Permissions: &u.permissionsToApply, + SMBProperties: &u.smbPropertiesToApply, + }) if err != nil { u.jptm.FailActiveSend("Applying final attribute settings", err) } @@ -370,69 +412,90 @@ func (u *azureFileSenderBase) Cleanup() { // contents will be at an unknown stage of partial completeness deletionContext, cancelFn := context.WithTimeout(context.WithValue(context.Background(), ServiceAPIVersionOverride, DefaultServiceApiVersion), 2*time.Minute) defer cancelFn() - _, err := u.fileURL().Delete(deletionContext) + _, err := u.getFileClient().Delete(deletionContext, nil) if err != nil { - jptm.Log(pipeline.LogError, fmt.Sprintf("error deleting the (incomplete) file %s. Failed with error %s", u.fileOrDirURL.String(), err.Error())) + jptm.Log(pipeline.LogError, fmt.Sprintf("error deleting the (incomplete) file %s. Failed with error %s", u.fileOrDirClient.URL(), err.Error())) } } } func (u *azureFileSenderBase) GetDestinationLength() (int64, error) { - prop, err := u.fileURL().GetProperties(u.ctx) + prop, err := u.getFileClient().GetProperties(u.ctx, nil) if err != nil { return -1, err } - return prop.ContentLength(), nil + if prop.ContentLength == nil { + return -1, fmt.Errorf("destination content length not returned") + } + return *prop.ContentLength, nil } func (u *azureFileSenderBase) EnsureFolderExists() error { - return AzureFileParentDirCreator{}.CreateDirToRoot(u.ctx, u.dirURL(), u.pipeline, u.jptm.GetFolderCreationTracker()) + return AzureFileParentDirCreator{}.CreateDirToRoot(u.ctx, u.shareClient, u.getDirectoryClient(), u.jptm.GetFolderCreationTracker()) } func (u *azureFileSenderBase) SetFolderProperties() error { info := u.jptm.Info() - _, err := u.addPermissionsToHeaders(info, u.dirURL().URL()) + _, err := u.addPermissionsToHeaders(info, u.getDirectoryClient().URL()) if err != nil { return err } - _, err = u.addSMBPropertiesToHeaders(info, u.dirURL().URL()) + _, err = u.addSMBPropertiesToHeaders(info) if err != nil { return err } - _, err = u.dirURL().SetMetadata(u.ctx, u.metadataToApply) + _, err = u.getDirectoryClient().SetMetadata(u.ctx, &directory.SetMetadataOptions{Metadata: u.metadataToApply}) if err != nil { return err } err = u.DoWithOverrideReadOnly(u.ctx, - func() (interface{}, error) { return u.dirURL().SetProperties(u.ctx, u.headersToApply.SMBProperties) }, - u.fileOrDirURL, + func() (interface{}, error) { + return u.getDirectoryClient().SetProperties(u.ctx, &directory.SetPropertiesOptions{ + FileSMBProperties: &u.smbPropertiesToApply, + FilePermissions: &u.permissionsToApply, + }) }, + u.fileOrDirClient, u.jptm.GetForceIfReadOnly()) return err } func (u *azureFileSenderBase) DirUrlToString() string { - dirUrl := azfile.NewFileURLParts(u.dirURL().URL()).URL() - dirUrl.RawQuery = "" + directoryURL := u.getDirectoryClient().URL() + rawURL, err := url.Parse(directoryURL) + common.PanicIfErr(err) + rawURL.RawQuery = "" // To avoid encoding/decoding - dirUrl.RawPath = "" - return dirUrl.String() + rawURL.RawPath = "" + return rawURL.String() } // namespace for functions related to creating parent directories in Azure File // to avoid free floating global funcs type AzureFileParentDirCreator struct{} -// getParentDirectoryURL gets parent directory URL of an Azure FileURL. -func (AzureFileParentDirCreator) getParentDirectoryURL(uh URLHolder, p pipeline.Pipeline) azfile.DirectoryURL { - u := uh.URL() - u.Path = u.Path[:strings.LastIndex(u.Path, "/")] - return azfile.NewDirectoryURL(u, p) +// getParentDirectoryClient gets parent directory client of a path. +func (AzureFileParentDirCreator) getParentDirectoryClient(uh URLHolder, serviceClient *service.Client) (*share.Client, *directory.Client, error) { + rawURL, _ := url.Parse(uh.URL()) + rawURL.Path = rawURL.Path[:strings.LastIndex(rawURL.Path, "/")] + directoryURLParts, err := filesas.ParseURL(rawURL.String()) + if err != nil { + return nil, nil, err + } + directoryOrFilePath := directoryURLParts.DirectoryOrFilePath + shareClient := serviceClient.NewShareClient(directoryURLParts.ShareName) + if directoryURLParts.ShareSnapshot != "" { + shareClient, err = shareClient.WithSnapshot(directoryURLParts.ShareSnapshot) + if err != nil { + return nil, nil, err + } + } + return shareClient, shareClient.NewRootDirectoryClient().NewSubdirectoryClient(directoryOrFilePath), nil } // verifyAndHandleCreateErrors handles create errors, StatusConflict is ignored, as specific level directory could be existing. @@ -440,9 +503,8 @@ func (AzureFileParentDirCreator) getParentDirectoryURL(uh URLHolder, p pipeline. // and there is no permission on directory level, i.e. create directory is a general permission for each level directories for Azure file. func (AzureFileParentDirCreator) verifyAndHandleCreateErrors(err error) error { if err != nil { - sErr, sErrOk := err.(azfile.StorageError) - if sErrOk && sErr.Response() != nil && - (sErr.Response().StatusCode == http.StatusConflict) { // Note the ServiceCode actually be AuthenticationFailure when share failed to be created, if want to create share as well. + var respErr *azcore.ResponseError + if errors.As(err, &respErr) && respErr.StatusCode == http.StatusConflict { // Note the ServiceCode actually be AuthenticationFailure when share failed to be created, if want to create share as well. return nil } return err @@ -459,31 +521,38 @@ func (AzureFileParentDirCreator) splitWithoutToken(str string, token rune) []str } // CreateParentDirToRoot creates parent directories of the Azure file if file's parent directory doesn't exist. -func (d AzureFileParentDirCreator) CreateParentDirToRoot(ctx context.Context, fileURL azfile.FileURL, p pipeline.Pipeline, t FolderCreationTracker) error { - dirURL := d.getParentDirectoryURL(fileURL, p) - return d.CreateDirToRoot(ctx, dirURL, p, t) +func (d AzureFileParentDirCreator) CreateParentDirToRoot(ctx context.Context, fileClient *file.Client, serviceClient *service.Client, t FolderCreationTracker) error { + shareClient, directoryClient, err := d.getParentDirectoryClient(fileClient, serviceClient) + if err != nil { + return err + } + return d.CreateDirToRoot(ctx, shareClient, directoryClient, t) } -// CreateDirToRoot Creates the dir (and parents as necessary) if it does not exist -func (d AzureFileParentDirCreator) CreateDirToRoot(ctx context.Context, dirURL azfile.DirectoryURL, p pipeline.Pipeline, t FolderCreationTracker) error { - dirURLExtension := common.FileURLPartsExtension{FileURLParts: azfile.NewFileURLParts(dirURL.URL())} - if _, err := dirURL.GetProperties(ctx); err != nil { - if resp, respOk := err.(pipeline.Response); respOk && resp.Response() != nil && - (resp.Response().StatusCode == http.StatusNotFound || - resp.Response().StatusCode == http.StatusForbidden) { +func (d AzureFileParentDirCreator) CreateDirToRoot(ctx context.Context, shareClient *share.Client, directoryClient *directory.Client, t FolderCreationTracker) error { + fileURLParts, err := file.ParseURL(directoryClient.URL()) + if err != nil { + return err + } + _, err = directoryClient.GetProperties(ctx, nil) + if err != nil { + var respErr *azcore.ResponseError + if errors.As(err, &respErr) && (respErr.StatusCode == http.StatusNotFound || respErr.StatusCode == http.StatusForbidden) { // Either the parent directory does not exist, or we may not have read permissions. // Try to create the parent directories. Split directories as segments. - segments := d.splitWithoutToken(dirURLExtension.DirectoryOrFilePath, '/') - - shareURL := azfile.NewShareURL(dirURLExtension.GetShareURL(), p) - curDirURL := shareURL.NewRootDirectoryURL() // Share directory should already exist, doesn't support creating share + segments := d.splitWithoutToken(fileURLParts.DirectoryOrFilePath, '/') + currentDirectoryClient := shareClient.NewRootDirectoryClient() // Share directory should already exist, doesn't support creating share // Try to create the directories for i := 0; i < len(segments); i++ { - curDirURL = curDirURL.NewDirectoryURL(segments[i]) - recorderURL := curDirURL.URL() + currentDirectoryClient = currentDirectoryClient.NewSubdirectoryClient(segments[i]) + rawURL := currentDirectoryClient.URL() + recorderURL, err := url.Parse(rawURL) + if err != nil { + return err + } recorderURL.RawQuery = "" err = t.CreateFolder(recorderURL.String(), func() error { - _, err := curDirURL.Create(ctx, azfile.Metadata{}, azfile.SMBProperties{}) + _, err := currentDirectoryClient.Create(ctx, nil) return err }) if verifiedErr := d.verifyAndHandleCreateErrors(err); verifiedErr != nil { @@ -494,7 +563,6 @@ func (d AzureFileParentDirCreator) CreateDirToRoot(ctx context.Context, dirURL a return err } } - // Directly return if parent directory exists. return nil -} +} \ No newline at end of file diff --git a/ste/sender-azureFileFromLocal.go b/ste/sender-azureFileFromLocal.go index 1bf523c5c..99e27d349 100644 --- a/ste/sender-azureFileFromLocal.go +++ b/ste/sender-azureFileFromLocal.go @@ -22,6 +22,7 @@ package ste import ( "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-azcopy/v10/common" @@ -32,8 +33,8 @@ type azureFileUploader struct { md5Channel chan []byte } -func newAzureFilesUploader(jptm IJobPartTransferMgr, destination string, p pipeline.Pipeline, pacer pacer, sip ISourceInfoProvider) (sender, error) { - senderBase, err := newAzureFileSenderBase(jptm, destination, p, pacer, sip) +func newAzureFilesUploader(jptm IJobPartTransferMgr, destination string, _ pipeline.Pipeline, pacer pacer, sip ISourceInfoProvider) (sender, error) { + senderBase, err := newAzureFileSenderBase(jptm, destination, pacer, sip) if err != nil { return nil, err } @@ -68,7 +69,7 @@ func (u *azureFileUploader) GenerateUploadFunc(id common.ChunkID, blockIndex int // upload the byte range represented by this chunk jptm.LogChunkStatus(id, common.EWaitReason.Body()) body := newPacedRequestBody(u.ctx, reader, u.pacer) - _, err := u.fileURL().UploadRange(u.ctx, id.OffsetInFile(), body, nil) + _, err := u.getFileClient().UploadRange(u.ctx, id.OffsetInFile(), body, nil) if err != nil { jptm.FailActiveUpload("Uploading range", err) return @@ -89,7 +90,11 @@ func (u *azureFileUploader) Epilogue() { } u.headersToApply.ContentMD5 = md5Hash - _, err := u.fileURL().SetHTTPHeaders(u.ctx, u.headersToApply) + _, err := u.getFileClient().SetHTTPHeaders(u.ctx, &file.SetHTTPHeadersOptions{ + HTTPHeaders: &u.headersToApply, + Permissions: &u.permissionsToApply, + SMBProperties: &u.smbPropertiesToApply, + }) return err }) } diff --git a/ste/sender-azureFileFromURL.go b/ste/sender-azureFileFromURL.go index 0391cd935..298401bb3 100644 --- a/ste/sender-azureFileFromURL.go +++ b/ste/sender-azureFileFromURL.go @@ -21,21 +21,19 @@ package ste import ( - "net/url" - "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-azcopy/v10/common" ) type urlToAzureFileCopier struct { azureFileSenderBase - srcURL url.URL + srcURL string } -func newURLToAzureFileCopier(jptm IJobPartTransferMgr, destination string, p pipeline.Pipeline, pacer pacer, sip ISourceInfoProvider) (sender, error) { +func newURLToAzureFileCopier(jptm IJobPartTransferMgr, destination string, _ pipeline.Pipeline, pacer pacer, sip ISourceInfoProvider) (sender, error) { srcInfoProvider := sip.(IRemoteSourceInfoProvider) // "downcast" to the type we know it really has - senderBase, err := newAzureFileSenderBase(jptm, destination, p, pacer, sip) + senderBase, err := newAzureFileSenderBase(jptm, destination, pacer, sip) if err != nil { return nil, err } @@ -45,7 +43,7 @@ func newURLToAzureFileCopier(jptm IJobPartTransferMgr, destination string, p pip return nil, err } - return &urlToAzureFileCopier{azureFileSenderBase: *senderBase, srcURL: *srcURL}, nil + return &urlToAzureFileCopier{azureFileSenderBase: *senderBase, srcURL: srcURL}, nil } func (u *urlToAzureFileCopier) GenerateCopyFunc(id common.ChunkID, blockIndex int32, adjustedChunkSize int64, chunkIsWholeFile bool) chunkFunc { @@ -64,8 +62,8 @@ func (u *urlToAzureFileCopier) GenerateCopyFunc(id common.ChunkID, blockIndex in if err := u.pacer.RequestTrafficAllocation(u.jptm.Context(), adjustedChunkSize); err != nil { u.jptm.FailActiveUpload("Pacing block (global level)", err) } - _, err := u.fileURL().UploadRangeFromURL( - u.ctx, u.srcURL, id.OffsetInFile(), id.OffsetInFile(), adjustedChunkSize) + _, err := u.getFileClient().UploadRangeFromURL( + u.ctx, u.srcURL, id.OffsetInFile(), id.OffsetInFile(), adjustedChunkSize, nil) if err != nil { u.jptm.FailActiveS2SCopy("Uploading range from URL", err) return diff --git a/ste/sender-blobFS.go b/ste/sender-blobFS.go index e25fc8ced..e7025c7ea 100644 --- a/ste/sender-blobFS.go +++ b/ste/sender-blobFS.go @@ -23,7 +23,10 @@ package ste import ( "context" "fmt" - "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" "net/url" "strings" "time" @@ -34,11 +37,16 @@ import ( "github.com/Azure/azure-storage-azcopy/v10/common" ) +type URLHolderV1 interface { + String() string + URL() url.URL +} + type blobFSSenderBase struct { jptm IJobPartTransferMgr - sip ISourceInfoProvider - fileOrDirURL URLHolder - chunkSize int64 + sip ISourceInfoProvider + fileOrDirURL URLHolderV1 + chunkSize int64 numChunks uint32 pipeline pipeline.Pipeline pacer pacer @@ -66,7 +74,7 @@ func newBlobFSSenderBase(jptm IJobPartTransferMgr, destination string, p pipelin } headers := props.SrcHTTPHeaders.ToBlobFSHTTPHeaders() - var h URLHolder + var h URLHolderV1 if info.IsFolderPropertiesTransfer() { h = azbfs.NewDirectoryURL(*destURL, p) } else { @@ -216,12 +224,15 @@ func (u *blobFSSenderBase) doEnsureDirExists(d azbfs.DirectoryURL) error { return err } -func (u *blobFSSenderBase) GetBlobURL() azblob.BlobURL{ - blobPipeline := u.jptm.(*jobPartTransferMgr).jobPartMgr.(*jobPartMgr).secondaryPipeline // pull the secondary (blob) pipeline - bURLParts := azblob.NewBlobURLParts(u.fileOrDirURL.URL()) - bURLParts.Host = strings.ReplaceAll(bURLParts.Host, ".dfs", ".blob") // switch back to blob +func (u *blobFSSenderBase) GetBlobURL() (*blockblob.Client, error) { + blobURLParts, err := blob.ParseURL(u.fileOrDirURL.String()) + if err != nil { + return nil, err + } + blobURLParts.Host = strings.ReplaceAll(blobURLParts.Host, ".dfs", ".blob") // switch back to blob - return azblob.NewBlobURL(bURLParts.URL(), blobPipeline) + client := common.CreateBlockBlobClient(blobURLParts.String(), u.jptm.CredentialInfo(), u.jptm.CredentialOpOptions(), u.jptm.ClientOptions()) + return client, nil } func (u *blobFSSenderBase) GetSourcePOSIXProperties() (common.UnixStatAdapter, error) { @@ -245,11 +256,15 @@ func (u *blobFSSenderBase) SetPOSIXProperties() error { return nil } - meta := azblob.Metadata{} + meta := common.Metadata{} common.AddStatToBlobMetadata(adapter, meta) delete(meta, common.POSIXFolderMeta) // Can't be set on HNS accounts. - _, err = u.GetBlobURL().SetMetadata(u.jptm.Context(), meta, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) + client, err := u.GetBlobURL() + if err != nil { + return err + } + _, err = client.SetMetadata(u.jptm.Context(), meta, nil) return err } @@ -267,7 +282,7 @@ func (u *blobFSSenderBase) DirUrlToString() string { } func (u *blobFSSenderBase) SendSymlink(linkData string) error { - meta := azblob.Metadata{} // meta isn't traditionally supported for dfs, but still exists + meta := common.Metadata{} // meta isn't traditionally supported for dfs, but still exists adapter, err := u.GetSourcePOSIXProperties() if err != nil { return fmt.Errorf("when polling for POSIX properties: %w", err) @@ -276,25 +291,25 @@ func (u *blobFSSenderBase) SendSymlink(linkData string) error { } common.AddStatToBlobMetadata(adapter, meta) - meta[common.POSIXSymlinkMeta] = "true" // just in case there isn't any metadata - blobHeaders := azblob.BlobHTTPHeaders{ // translate headers, since those still apply - ContentType: u.creationTimeHeaders.ContentType, - ContentEncoding: u.creationTimeHeaders.ContentEncoding, - ContentLanguage: u.creationTimeHeaders.ContentLanguage, - ContentDisposition: u.creationTimeHeaders.ContentDisposition, - CacheControl: u.creationTimeHeaders.CacheControl, + meta[common.POSIXSymlinkMeta] = to.Ptr("true") // just in case there isn't any metadata + blobHeaders := blob.HTTPHeaders{ // translate headers, since those still apply + BlobContentType: &u.creationTimeHeaders.ContentType, + BlobContentEncoding: &u.creationTimeHeaders.ContentEncoding, + BlobContentLanguage: &u.creationTimeHeaders.ContentLanguage, + BlobContentDisposition: &u.creationTimeHeaders.ContentDisposition, + BlobCacheControl: &u.creationTimeHeaders.CacheControl, } - - _, err = u.GetBlobURL().ToBlockBlobURL().Upload( + client, err := u.GetBlobURL() + if err != nil { + return err + } + _, err = client.Upload( u.jptm.Context(), - strings.NewReader(linkData), - blobHeaders, - meta, - azblob.BlobAccessConditions{}, - azblob.AccessTierNone, // dfs uses default tier - nil, // dfs doesn't support tags - azblob.ClientProvidedKeyOptions{}, // cpk isn't used for dfs - azblob.ImmutabilityPolicyOptions{}) // dfs doesn't support immutability policy + streaming.NopCloser(strings.NewReader(linkData)), + &blockblob.UploadOptions{ + HTTPHeaders: &blobHeaders, + Metadata: meta, + }) return err } diff --git a/ste/sender-blobFolders.go b/ste/sender-blobFolders.go index 044364913..42d4b2d91 100644 --- a/ste/sender-blobFolders.go +++ b/ste/sender-blobFolders.go @@ -1,33 +1,33 @@ package ste import ( + "bytes" "fmt" "github.com/Azure/azure-pipeline-go/pipeline" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" "github.com/Azure/azure-storage-azcopy/v10/azbfs" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" "net/url" "strings" "time" ) type blobFolderSender struct { - destination azblob.BlockBlobURL // We'll treat all folders as block blobs - jptm IJobPartTransferMgr - sip ISourceInfoProvider - metadataToApply azblob.Metadata - headersToAppply azblob.BlobHTTPHeaders - blobTagsToApply azblob.BlobTagsMap - cpkToApply azblob.ClientProvidedKeyOptions + destinationClient *blockblob.Client // We'll treat all folders as block blobs + jptm IJobPartTransferMgr + sip ISourceInfoProvider + metadataToApply common.Metadata + headersToApply blob.HTTPHeaders + blobTagsToApply common.BlobTags } -func newBlobFolderSender(jptm IJobPartTransferMgr, destination string, p pipeline.Pipeline, pacer pacer, sip ISourceInfoProvider) (sender, error) { - destURL, err := url.Parse(destination) - if err != nil { - return nil, err - } +func newBlobFolderSender(jptm IJobPartTransferMgr, destination string, sip ISourceInfoProvider) (sender, error) { + destinationClient := common.CreateBlockBlobClient(destination, jptm.CredentialInfo(), jptm.CredentialOpOptions(), jptm.ClientOptions()) - destBlockBlobURL := azblob.NewBlockBlobURL(*destURL, p) props, err := sip.Properties() if err != nil { @@ -36,13 +36,12 @@ func newBlobFolderSender(jptm IJobPartTransferMgr, destination string, p pipelin var out sender fsend := blobFolderSender{ - jptm: jptm, - sip: sip, - destination: destBlockBlobURL, - metadataToApply: props.SrcMetadata.Clone().ToAzBlobMetadata(), // We're going to modify it, so we should clone it. - headersToAppply: props.SrcHTTPHeaders.ToAzBlobHTTPHeaders(), - blobTagsToApply: props.SrcBlobTags.ToAzBlobTagsMap(), - cpkToApply: common.ToClientProvidedKeyOptions(jptm.CpkInfo(), jptm.CpkScopeInfo()), + jptm: jptm, + sip: sip, + destinationClient: destinationClient, + metadataToApply: props.SrcMetadata.Clone(), // We're going to modify it, so we should clone it. + headersToApply: props.SrcHTTPHeaders.ToBlobHTTPHeaders(), + blobTagsToApply: props.SrcBlobTags, } fromTo := jptm.FromTo() if fromTo.IsUpload() { @@ -55,11 +54,18 @@ func newBlobFolderSender(jptm IJobPartTransferMgr, destination string, p pipelin } func (b *blobFolderSender) setDatalakeACLs() { - bURLParts := azblob.NewBlobURLParts(b.destination.URL()) - bURLParts.BlobName = strings.TrimSuffix(bURLParts.BlobName, "/") // BlobFS does not like when we target a folder with the / - bURLParts.Host = strings.ReplaceAll(bURLParts.Host, ".blob", ".dfs") + blobURLParts, err := blob.ParseURL(b.destinationClient.URL()) + if err != nil { + b.jptm.FailActiveSend("Parsing blob URL", err) + } + blobURLParts.BlobName = strings.TrimSuffix(blobURLParts.BlobName, "/") // BlobFS does not like when we target a folder with the / + blobURLParts.Host = strings.ReplaceAll(blobURLParts.Host, ".blob", ".dfs") + dfsURL, err := url.Parse(blobURLParts.String()) + if err != nil { + b.jptm.FailActiveSend("Parsing datalake URL", err) + } // todo: jank, and violates the principle of interfaces - fileURL := azbfs.NewFileURL(bURLParts.URL(), b.jptm.(*jobPartTransferMgr).jobPartMgr.(*jobPartMgr).secondaryPipeline) + fileURL := azbfs.NewFileURL(*dfsURL, b.jptm.(*jobPartTransferMgr).jobPartMgr.(*jobPartMgr).secondaryPipeline) // We know for a fact our source is a "blob". acl, err := b.sip.(*blobSourceInfoProvider).AccessControl() @@ -83,19 +89,24 @@ func (b *blobFolderSender) overwriteDFSProperties() (string, error) { // do not set folder flag as it's invalid to modify a folder with delete(b.metadataToApply, "hdi_isfolder") + delete(b.metadataToApply, "Hdi_isfolder") + // TODO : Here should we undo delete "Hdi_isfolder" too? // SetMetadata can set CPK if it wasn't specified prior. This is not a "full" overwrite, but a best-effort overwrite. - _, err = b.destination.SetMetadata(b.jptm.Context(), b.metadataToApply, azblob.BlobAccessConditions{}, b.cpkToApply) + _, err = b.destinationClient.SetMetadata(b.jptm.Context(), b.metadataToApply, + &blob.SetMetadataOptions{ + CPKInfo: b.jptm.CpkInfo(), + CPKScopeInfo: b.jptm.CpkScopeInfo(), + }) if err != nil { return "Set Metadata", fmt.Errorf("A best-effort overwrite was attempted; CPK errors cannot be handled when the blob cannot be deleted.\n%w", err) } - - // blob API not yet supported for HNS account error; re-enable later. - //_, err = b.destination.SetTags(b.jptm.Context(), nil, nil, nil, b.blobTagsToApply) + //// blob API not yet supported for HNS account error; re-enable later. + //_, err = b.destinationClient.SetTags(b.jptm.Context(), b.blobTagsToApply, nil) //if err != nil { // return "Set Blob Tags", err //} - _, err = b.destination.SetHTTPHeaders(b.jptm.Context(), b.headersToAppply, azblob.BlobAccessConditions{}) + _, err = b.destinationClient.SetHTTPHeaders(b.jptm.Context(), b.headersToApply, nil) if err != nil { return "Set HTTP Headers", err } @@ -109,11 +120,18 @@ func (b *blobFolderSender) overwriteDFSProperties() (string, error) { } func (b *blobFolderSender) SetContainerACL() error { - bURLParts := azblob.NewBlobURLParts(b.destination.URL()) - bURLParts.ContainerName += "/" // Container-level ACLs NEED a / - bURLParts.Host = strings.ReplaceAll(bURLParts.Host, ".blob", ".dfs") + blobURLParts, err := blob.ParseURL(b.destinationClient.URL()) + if err != nil { + b.jptm.FailActiveSend("Parsing blob URL", err) + } + blobURLParts.ContainerName += "/" // container level perms MUST have a / + blobURLParts.Host = strings.ReplaceAll(blobURLParts.Host, ".blob", ".dfs") + dfsURL, err := url.Parse(blobURLParts.String()) + if err != nil { + b.jptm.FailActiveSend("Parsing datalake URL", err) + } // todo: jank, and violates the principle of interfaces - rootURL := azbfs.NewFileSystemURL(bURLParts.URL(), b.jptm.(*jobPartTransferMgr).jobPartMgr.(*jobPartMgr).secondaryPipeline) + rootURL := azbfs.NewFileSystemURL(*dfsURL, b.jptm.(*jobPartTransferMgr).jobPartMgr.(*jobPartMgr).secondaryPipeline) // We know for a fact our source is a "blob". acl, err := b.sip.(*blobSourceInfoProvider).AccessControl() @@ -134,13 +152,17 @@ func (b *blobFolderSender) SetContainerACL() error { func (b *blobFolderSender) EnsureFolderExists() error { t := b.jptm.GetFolderCreationTracker() - if azblob.NewBlobURLParts(b.destination.URL()).BlobName == "" { + parsedURL, err := blob.ParseURL(b.destinationClient.URL()) + if err != nil { + return err + } + if parsedURL.BlobName == "" { return b.SetContainerACL() // Can't do much with a container, but it is here. } - _, err := b.destination.GetProperties(b.jptm.Context(), azblob.BlobAccessConditions{}, b.cpkToApply) + _, err = b.destinationClient.GetProperties(b.jptm.Context(), &blob.GetPropertiesOptions{CPKInfo: b.jptm.CpkInfo()}) if err != nil { - if stgErr, ok := err.(azblob.StorageError); !(ok && stgErr.ServiceCode() == azblob.ServiceCodeBlobNotFound) { + if !bloberror.HasCode(err, bloberror.BlobNotFound) { return fmt.Errorf("when checking if blob exists: %w", err) } } else { @@ -152,19 +174,15 @@ func (b *blobFolderSender) EnsureFolderExists() error { If so, we should delete the old blob, and create a new one in it's place with all of our fancy new properties. */ if t.ShouldSetProperties(b.DirUrlToString(), b.jptm.GetOverwriteOption(), b.jptm.GetOverwritePrompter()) { - _, err := b.destination.Delete(b.jptm.Context(), azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) + _, err := b.destinationClient.Delete(b.jptm.Context(), nil) if err != nil { - if stgErr, ok := err.(azblob.StorageError); ok { - if stgErr.ServiceCode() == "DirectoryIsNotEmpty" { // this is DFS, and we cannot do a standard replacement on it. Opt to simply overwrite the properties. + if bloberror.HasCode(err, "DirectoryIsNotEmpty") { // this is DFS, and we cannot do a standard replacement on it. Opt to simply overwrite the properties. where, err := b.overwriteDFSProperties() if err != nil { return fmt.Errorf("%w. When %s", err, where) } - return nil - } } - return fmt.Errorf("when deleting existing blob: %w", err) } } else { @@ -177,22 +195,27 @@ func (b *blobFolderSender) EnsureFolderExists() error { } } - b.metadataToApply["hdi_isfolder"] = "true" // Set folder metadata flag + // TODO (gapra): figure out better way to deal with hdi_isfolder metadata key capitalization + if b.metadataToApply["Hdi_isfolder"] != nil { + b.metadataToApply["Hdi_isfolder"] = to.Ptr("true") // Set folder metadata flag + } else { + b.metadataToApply["hdi_isfolder"] = to.Ptr("true") // Set folder metadata flag + } err = b.getExtraProperties() if err != nil { return fmt.Errorf("when getting additional folder properties: %w", err) } err = t.CreateFolder(b.DirUrlToString(), func() error { - _, err := b.destination.Upload(b.jptm.Context(), - strings.NewReader(""), - b.headersToAppply, - b.metadataToApply, - azblob.BlobAccessConditions{}, - azblob.DefaultAccessTier, // It doesn't make sense to use a special access tier, the blob will be 0 bytes. - b.blobTagsToApply, - b.cpkToApply, - azblob.ImmutabilityPolicyOptions{}) + // It doesn't make sense to use a special access tier for a blob folder, the blob will be 0 bytes. + _, err := b.destinationClient.Upload(b.jptm.Context(), streaming.NopCloser(bytes.NewReader(nil)), + &blockblob.UploadOptions{ + HTTPHeaders: &b.headersToApply, + Metadata: b.metadataToApply, + Tags: b.blobTagsToApply, + CPKInfo: b.jptm.CpkInfo(), + CPKScopeInfo: b.jptm.CpkScopeInfo(), + }) return err }) @@ -214,10 +237,14 @@ func (b *blobFolderSender) SetFolderProperties() error { } func (b *blobFolderSender) DirUrlToString() string { - uri, _ := url.Parse(b.jptm.Info().Destination) - uri.RawPath = "" - uri.RawQuery = "" - return uri.String() + rawURL := b.jptm.Info().Destination + parsedURL, err := url.Parse(rawURL) + if err != nil { + return "" + } + parsedURL.RawPath = "" + parsedURL.RawQuery = "" + return parsedURL.String() } // ===== Implement sender so that it can be returned in newBlobUploader. ===== @@ -259,11 +286,11 @@ type dummyFolderUploader struct { blobFolderSender } -func (d dummyFolderUploader) GenerateUploadFunc(chunkID common.ChunkID, blockIndex int32, reader common.SingleChunkReader, chunkIsWholeFile bool) chunkFunc { +func (d *dummyFolderUploader) GenerateUploadFunc(chunkID common.ChunkID, blockIndex int32, reader common.SingleChunkReader, chunkIsWholeFile bool) chunkFunc { panic("this sender only sends folders.") } -func (d dummyFolderUploader) Md5Channel() chan<- []byte { +func (d *dummyFolderUploader) Md5Channel() chan<- []byte { panic("this sender only sends folders.") } @@ -273,7 +300,7 @@ type dummyFolderS2SCopier struct { blobFolderSender } -func (d dummyFolderS2SCopier) GenerateCopyFunc(chunkID common.ChunkID, blockIndex int32, adjustedChunkSize int64, chunkIsWholeFile bool) chunkFunc { +func (d *dummyFolderS2SCopier) GenerateCopyFunc(chunkID common.ChunkID, blockIndex int32, adjustedChunkSize int64, chunkIsWholeFile bool) chunkFunc { // TODO implement me panic("implement me") } diff --git a/ste/sender-blobFolders_linux.go b/ste/sender-blobFolders_linux.go index 4ad0e941c..af49ef11c 100644 --- a/ste/sender-blobFolders_linux.go +++ b/ste/sender-blobFolders_linux.go @@ -4,7 +4,7 @@ package ste import "github.com/Azure/azure-storage-azcopy/v10/common" -func (b blobFolderSender) getExtraProperties() error { +func (b *blobFolderSender) getExtraProperties() error { if b.jptm.Info().PreservePOSIXProperties { if sip, ok := b.sip.(*localFileSourceInfoProvider); ok { // has UNIX properties for sure; Blob metadata gets handled as expected. statAdapter, err := sip.GetUNIXProperties() diff --git a/ste/sender-blobFolders_other.go b/ste/sender-blobFolders_other.go index ba7115642..127486bba 100644 --- a/ste/sender-blobFolders_other.go +++ b/ste/sender-blobFolders_other.go @@ -2,6 +2,6 @@ package ste -func (b blobFolderSender) getExtraProperties() error { +func (b *blobFolderSender) getExtraProperties() error { return nil } \ No newline at end of file diff --git a/ste/sender-blobSymlinks.go b/ste/sender-blobSymlinks.go index 96f3f35e9..448ed8c66 100644 --- a/ste/sender-blobSymlinks.go +++ b/ste/sender-blobSymlinks.go @@ -2,53 +2,49 @@ package ste import ( "fmt" - "github.com/Azure/azure-pipeline-go/pipeline" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" - "net/url" "strings" "time" ) type blobSymlinkSender struct { - destBlockBlobURL azblob.BlockBlobURL + destinationClient *blockblob.Client jptm IJobPartTransferMgr sip ISourceInfoProvider - headersToApply azblob.BlobHTTPHeaders - metadataToApply azblob.Metadata - destBlobTier azblob.AccessTierType - blobTagsToApply azblob.BlobTagsMap - cpkToApply azblob.ClientProvidedKeyOptions + headersToApply blob.HTTPHeaders + metadataToApply common.Metadata + destBlobTier *blob.AccessTier + blobTagsToApply common.BlobTags } -func newBlobSymlinkSender(jptm IJobPartTransferMgr, destination string, p pipeline.Pipeline, pacer pacer, sip ISourceInfoProvider) (sender, error) { - destURL, err := url.Parse(destination) - if err != nil { - return nil, err - } - - destBlockBlobURL := azblob.NewBlockBlobURL(*destURL, p) +func newBlobSymlinkSender(jptm IJobPartTransferMgr, destination string, sip ISourceInfoProvider) (sender, error) { + destinationClient := common.CreateBlockBlobClient(destination, jptm.CredentialInfo(), jptm.CredentialOpOptions(), jptm.ClientOptions()) props, err := sip.Properties() if err != nil { return nil, err } - destBlobTier := azblob.AccessTierNone + var destBlobTier *blob.AccessTier blockBlobTierOverride, _ := jptm.BlobTiers() if blockBlobTierOverride != common.EBlockBlobTier.None() { - destBlobTier = blockBlobTierOverride.ToAccessTierType() + destBlobTier = to.Ptr(blockBlobTierOverride.ToAccessTierType()) + } else { + destBlobTier = nil } var out sender ssend := blobSymlinkSender{ jptm: jptm, sip: sip, - destBlockBlobURL: destBlockBlobURL, - metadataToApply: props.SrcMetadata.Clone().ToAzBlobMetadata(), // We're going to modify it, so we should clone it. - headersToApply: props.SrcHTTPHeaders.ToAzBlobHTTPHeaders(), - blobTagsToApply: props.SrcBlobTags.ToAzBlobTagsMap(), - cpkToApply: common.ToClientProvidedKeyOptions(jptm.CpkInfo(), jptm.CpkScopeInfo()), + destinationClient: destinationClient, + metadataToApply: props.SrcMetadata.Clone(), // We're going to modify it, so we should clone it. + headersToApply: props.SrcHTTPHeaders.ToBlobHTTPHeaders(), + blobTagsToApply: props.SrcBlobTags, destBlobTier: destBlobTier, } fromTo := jptm.FromTo() @@ -66,9 +62,17 @@ func (s *blobSymlinkSender) SendSymlink(linkData string) error { if err != nil { return fmt.Errorf("when getting additional folder properties: %w", err) } - s.metadataToApply["is_symlink"] = "true" - - _, err = s.destBlockBlobURL.Upload(s.jptm.Context(), strings.NewReader(linkData), s.headersToApply, s.metadataToApply, azblob.BlobAccessConditions{}, s.destBlobTier, s.blobTagsToApply, s.cpkToApply, azblob.ImmutabilityPolicyOptions{}) + s.metadataToApply["is_symlink"] = to.Ptr("true") + + _, err = s.destinationClient.Upload(s.jptm.Context(), streaming.NopCloser(strings.NewReader(linkData)), + &blockblob.UploadOptions{ + HTTPHeaders: &s.headersToApply, + Metadata: s.metadataToApply, + Tier: s.destBlobTier, + Tags: s.blobTagsToApply, + CPKInfo: s.jptm.CpkInfo(), + CPKScopeInfo: s.jptm.CpkScopeInfo(), + }) return err } diff --git a/ste/sender-blobSymlinks_linux.go b/ste/sender-blobSymlinks_linux.go index b4e55d4e3..077c7cc4a 100644 --- a/ste/sender-blobSymlinks_linux.go +++ b/ste/sender-blobSymlinks_linux.go @@ -11,7 +11,7 @@ func (s *blobSymlinkSender) getExtraProperties() error { if s.jptm.Info().PreservePOSIXProperties { if unixSIP, ok := s.sip.(IUNIXPropertyBearingSourceInfoProvider); ok { // Clone the metadata before we write to it, we shouldn't be writing to the same metadata as every other blob. - s.metadataToApply = common.Metadata(s.metadataToApply).Clone().ToAzBlobMetadata() + s.metadataToApply = s.metadataToApply.Clone() statAdapter, err := unixSIP.GetUNIXProperties() if err != nil { diff --git a/ste/sender-blockBlob.go b/ste/sender-blockBlob.go index 30357c308..1913bd8a0 100644 --- a/ste/sender-blockBlob.go +++ b/ste/sender-blockBlob.go @@ -25,6 +25,8 @@ import ( "encoding/base64" "errors" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" "net/url" "strconv" "strings" @@ -35,7 +37,6 @@ import ( "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-azcopy/v10/azbfs" - "github.com/Azure/azure-storage-blob-go/azblob" "github.com/Azure/azure-storage-azcopy/v10/common" ) @@ -43,22 +44,21 @@ import ( var lowMemoryLimitAdvice sync.Once type blockBlobSenderBase struct { - jptm IJobPartTransferMgr - sip ISourceInfoProvider - destBlockBlobURL azblob.BlockBlobURL - chunkSize int64 - numChunks uint32 - pacer pacer - blockIDs []string - destBlobTier azblob.AccessTierType + jptm IJobPartTransferMgr + sip ISourceInfoProvider + destBlockBlobClient *blockblob.Client + chunkSize int64 + numChunks uint32 + pacer pacer + blockIDs []string + destBlobTier blob.AccessTier // Headers and other info that we will apply to the destination object. // 1. For S2S, these come from the source service. // 2. When sending local data, they are computed based on the properties of the local file - headersToApply azblob.BlobHTTPHeaders - metadataToApply azblob.Metadata - blobTagsToApply azblob.BlobTagsMap - cpkToApply azblob.ClientProvidedKeyOptions + headersToApply blob.HTTPHeaders + metadataToApply common.Metadata + blobTagsToApply common.BlobTags atomicChunksWritten int32 atomicPutListIndicator int32 @@ -125,19 +125,14 @@ func getBlockNamePrefix(jobID common.JobID, partNum uint32, transferIndex uint32 return fmt.Sprintf("%s%s%05d%05d", placeHolderPrefix, jobIdStr, partNum, transferIndex) } -func newBlockBlobSenderBase(jptm IJobPartTransferMgr, destination string, p pipeline.Pipeline, pacer pacer, srcInfoProvider ISourceInfoProvider, inferredAccessTierType azblob.AccessTierType) (*blockBlobSenderBase, error) { +func newBlockBlobSenderBase(jptm IJobPartTransferMgr, destination string, pacer pacer, srcInfoProvider ISourceInfoProvider, inferredAccessTierType blob.AccessTier) (*blockBlobSenderBase, error) { // compute chunk count chunkSize, numChunks, err := getVerifiedChunkParams(jptm.Info(), jptm.CacheLimiter().Limit(), jptm.CacheLimiter().StrictLimit()) if err != nil { return nil, err } - destURL, err := url.Parse(destination) - if err != nil { - return nil, err - } - - destBlockBlobURL := azblob.NewBlockBlobURL(*destURL, p) + destBlockBlobClient := common.CreateBlockBlobClient(destination, jptm.CredentialInfo(), jptm.CredentialOpOptions(), jptm.ClientOptions()) props, err := srcInfoProvider.Properties() if err != nil { @@ -152,30 +147,27 @@ func newBlockBlobSenderBase(jptm IJobPartTransferMgr, destination string, p pipe destBlobTier = blockBlobTierOverride.ToAccessTierType() } - if props.SrcMetadata["hdi_isfolder"] == "true" { - destBlobTier = azblob.AccessTierNone + if (props.SrcMetadata["hdi_isfolder"] != nil && *props.SrcMetadata["hdi_isfolder"] == "true") || + (props.SrcMetadata["Hdi_isfolder"] != nil && *props.SrcMetadata["Hdi_isfolder"] == "true") { + destBlobTier = "" } - // Once track2 goes live, we'll not need to do this conversion/casting and can directly use CpkInfo & CpkScopeInfo - cpkToApply := common.ToClientProvidedKeyOptions(jptm.CpkInfo(), jptm.CpkScopeInfo()) - partNum, transferIndex := jptm.TransferIndex() return &blockBlobSenderBase{ - jptm: jptm, - sip: srcInfoProvider, - destBlockBlobURL: destBlockBlobURL, - chunkSize: chunkSize, - numChunks: numChunks, - pacer: pacer, - blockIDs: make([]string, numChunks), - headersToApply: props.SrcHTTPHeaders.ToAzBlobHTTPHeaders(), - metadataToApply: props.SrcMetadata.ToAzBlobMetadata(), - blobTagsToApply: props.SrcBlobTags.ToAzBlobTagsMap(), - destBlobTier: destBlobTier, - cpkToApply: cpkToApply, - muBlockIDs: &sync.Mutex{}, - blockNamePrefix: getBlockNamePrefix(jptm.Info().JobID, partNum, transferIndex), + jptm: jptm, + sip: srcInfoProvider, + destBlockBlobClient: destBlockBlobClient, + chunkSize: chunkSize, + numChunks: numChunks, + pacer: pacer, + blockIDs: make([]string, numChunks), + headersToApply: props.SrcHTTPHeaders.ToBlobHTTPHeaders(), + metadataToApply: props.SrcMetadata, + blobTagsToApply: props.SrcBlobTags, + destBlobTier: destBlobTier, + muBlockIDs: &sync.Mutex{}, + blockNamePrefix: getBlockNamePrefix(jptm.Info().JobID, partNum, transferIndex), }, nil } @@ -192,7 +184,8 @@ func (s *blockBlobSenderBase) NumChunks() uint32 { } func (s *blockBlobSenderBase) RemoteFileExists() (bool, time.Time, error) { - return remoteObjectExists(s.destBlockBlobURL.GetProperties(s.jptm.Context(), azblob.BlobAccessConditions{}, s.cpkToApply)) + properties, err := s.destBlockBlobClient.GetProperties(s.jptm.Context(), &blob.GetPropertiesOptions{CPKInfo: s.jptm.CpkInfo()}) + return remoteObjectExists(blobPropertiesResponseAdapter{properties}, err) } func (s *blockBlobSenderBase) Prologue(ps common.PrologueState) (destinationModified bool) { @@ -200,7 +193,7 @@ func (s *blockBlobSenderBase) Prologue(ps common.PrologueState) (destinationModi s.buildCommittedBlockMap() } if s.jptm.ShouldInferContentType() { - s.headersToApply.ContentType = ps.GetInferredContentType(s.jptm) + s.headersToApply.BlobContentType = ps.GetInferredContentType(s.jptm) } return false } @@ -223,29 +216,38 @@ func (s *blockBlobSenderBase) Epilogue() { jptm.Log(pipeline.LogDebug, fmt.Sprintf("Conclude Transfer with BlockList %s", blockIDs)) // commit the blocks. - if !ValidateTier(jptm, s.destBlobTier, s.destBlockBlobURL.BlobURL, s.jptm.Context(), false) { - s.destBlobTier = azblob.DefaultAccessTier + if !ValidateTier(jptm, s.destBlobTier, s.destBlockBlobClient, s.jptm.Context(), false) { + s.destBlobTier = "" } blobTags := s.blobTagsToApply - separateSetTagsRequired := separateSetTagsRequired(blobTags) - if separateSetTagsRequired || len(blobTags) == 0 { + setTags := separateSetTagsRequired(blobTags) + if setTags || len(blobTags) == 0 { blobTags = nil } // TODO: Remove this snippet once service starts supporting CPK with blob tier - destBlobTier := s.destBlobTier - if s.cpkToApply.EncryptionScope != nil || (s.cpkToApply.EncryptionKey != nil && s.cpkToApply.EncryptionKeySha256 != nil) { - destBlobTier = azblob.AccessTierNone + destBlobTier := &s.destBlobTier + if s.jptm.IsSourceEncrypted() { + destBlobTier = nil } - if _, err := s.destBlockBlobURL.CommitBlockList(jptm.Context(), blockIDs, s.headersToApply, s.metadataToApply, azblob.BlobAccessConditions{}, destBlobTier, blobTags, s.cpkToApply, azblob.ImmutabilityPolicyOptions{}); err != nil { + _, err := s.destBlockBlobClient.CommitBlockList(jptm.Context(), blockIDs, + &blockblob.CommitBlockListOptions{ + HTTPHeaders: &s.headersToApply, + Metadata: s.metadataToApply, + Tier: destBlobTier, + Tags: blobTags, + CPKInfo: s.jptm.CpkInfo(), + CPKScopeInfo: s.jptm.CpkScopeInfo(), + }) + if err != nil { jptm.FailActiveSend("Committing block list", err) return } - if separateSetTagsRequired { - if _, err := s.destBlockBlobURL.SetTags(jptm.Context(), nil, nil, nil, s.blobTagsToApply); err != nil { + if setTags { + if _, err := s.destBlockBlobClient.SetTags(jptm.Context(), s.blobTagsToApply, nil); err != nil { s.jptm.Log(pipeline.LogWarning, err.Error()) } } @@ -253,11 +255,18 @@ func (s *blockBlobSenderBase) Epilogue() { // Upload ADLS Gen 2 ACLs if jptm.FromTo() == common.EFromTo.BlobBlob() && jptm.Info().PreserveSMBPermissions.IsTruthy() { - bURLParts := azblob.NewBlobURLParts(s.destBlockBlobURL.URL()) - bURLParts.BlobName = strings.TrimSuffix(bURLParts.BlobName, "/") // BlobFS does not like when we target a folder with the / - bURLParts.Host = strings.ReplaceAll(bURLParts.Host, ".blob", ".dfs") + blobURLParts, err := blob.ParseURL(s.destBlockBlobClient.URL()) + if err != nil { + jptm.FailActiveSend("Parsing blob URL", err) + } + blobURLParts.BlobName = strings.TrimSuffix(blobURLParts.BlobName, "/") // BlobFS does not like when we target a folder with the / + blobURLParts.Host = strings.ReplaceAll(blobURLParts.Host, ".blob", ".dfs") + dfsURL, err := url.Parse(blobURLParts.String()) + if err != nil { + jptm.FailActiveSend("Parsing datalake URL", err) + } // todo: jank, and violates the principle of interfaces - fileURL := azbfs.NewFileURL(bURLParts.URL(), s.jptm.(*jobPartTransferMgr).jobPartMgr.(*jobPartMgr).secondaryPipeline) + fileURL := azbfs.NewFileURL(*dfsURL, s.jptm.(*jobPartTransferMgr).jobPartMgr.(*jobPartMgr).secondaryPipeline) // We know for a fact our source is a "blob". acl, err := s.sip.(*blobSourceInfoProvider).AccessControl() @@ -286,17 +295,17 @@ func (s *blockBlobSenderBase) Cleanup() { // This prevents customer paying for their storage for a week until they get garbage collected, and it // also prevents any issues with "too many uncommitted blocks" if user tries to upload the blob again in future. // But if there are committed blocks, leave them there (since they still safely represent the state before our job even started) - blockList, err := s.destBlockBlobURL.GetBlockList(deletionContext, azblob.BlockListAll, azblob.LeaseAccessConditions{}) + blockList, err := s.destBlockBlobClient.GetBlockList(deletionContext, blockblob.BlockListTypeAll, nil) hasUncommittedOnly := err == nil && len(blockList.CommittedBlocks) == 0 && len(blockList.UncommittedBlocks) > 0 if hasUncommittedOnly { jptm.LogAtLevelForCurrentTransfer(pipeline.LogDebug, "Deleting uncommitted destination blob due to cancellation") // Delete can delete uncommitted blobs. - _, _ = s.destBlockBlobURL.Delete(deletionContext, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) + _, _ = s.destBlockBlobClient.Delete(deletionContext, nil) } } else { // TODO: review (one last time) should we really do this? Or should we just give better error messages on "too many uncommitted blocks" errors jptm.LogAtLevelForCurrentTransfer(pipeline.LogDebug, "Deleting destination blob due to failure") - _, _ = s.destBlockBlobURL.Delete(deletionContext, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) + _, _ = s.destBlockBlobClient.Delete(deletionContext, nil) } } } @@ -306,7 +315,7 @@ func (s *blockBlobSenderBase) GenerateCopyMetadata(id common.ChunkID) chunkFunc return createChunkFunc(true, s.jptm, id, func() { if unixSIP, ok := s.sip.(IUNIXPropertyBearingSourceInfoProvider); ok { // Clone the metadata before we write to it, we shouldn't be writing to the same metadata as every other blob. - s.metadataToApply = common.Metadata(s.metadataToApply).Clone().ToAzBlobMetadata() + s.metadataToApply = s.metadataToApply.Clone() statAdapter, err := unixSIP.GetUNIXProperties() if err != nil { @@ -315,7 +324,11 @@ func (s *blockBlobSenderBase) GenerateCopyMetadata(id common.ChunkID) chunkFunc common.AddStatToBlobMetadata(statAdapter, s.metadataToApply) } - _, err := s.destBlockBlobURL.SetMetadata(s.jptm.Context(), s.metadataToApply, azblob.BlobAccessConditions{}, s.cpkToApply) + _, err := s.destBlockBlobClient.SetMetadata(s.jptm.Context(), s.metadataToApply, + &blob.SetMetadataOptions{ + CPKInfo: s.jptm.CpkInfo(), + CPKScopeInfo: s.jptm.CpkScopeInfo(), + }) if err != nil { s.jptm.FailActiveSend("Setting Metadata", err) return @@ -345,7 +358,7 @@ func (s *blockBlobSenderBase) buildCommittedBlockMap() { return } - blockList, err := s.destBlockBlobURL.GetBlockList(s.jptm.Context(), azblob.BlockListUncommitted, azblob.LeaseAccessConditions{}) + blockList, err := s.destBlockBlobClient.GetBlockList(s.jptm.Context(), blockblob.BlockListTypeUncommitted, nil) if err != nil { s.jptm.LogAtLevelForCurrentTransfer(pipeline.LogError, "Failed to get blocklist. Restarting whole file.") return @@ -360,12 +373,14 @@ func (s *blockBlobSenderBase) buildCommittedBlockMap() { // 1. We find chunks by a different actor // 2. Chunk size differs for _, block := range blockList.UncommittedBlocks { - if len(block.Name) != common.AZCOPY_BLOCKNAME_LENGTH { + name := common.IffNotNil(block.Name, "") + size := common.IffNotNil(block.Size, 0) + if len(name) != common.AZCOPY_BLOCKNAME_LENGTH { s.jptm.LogAtLevelForCurrentTransfer(pipeline.LogDebug, invalidAzCopyBlockNameMsg) return } - tmp, err := base64.StdEncoding.DecodeString(block.Name) + tmp, err := base64.StdEncoding.DecodeString(name) decodedBlockName := string(tmp) if err != nil || !strings.HasPrefix(decodedBlockName, s.blockNamePrefix) { s.jptm.LogAtLevelForCurrentTransfer(pipeline.LogDebug, invalidAzCopyBlockNameMsg) @@ -379,7 +394,7 @@ func (s *blockBlobSenderBase) buildCommittedBlockMap() { } // Last chunk may have different blockSize - if block.Size != s.ChunkSize() && index != int(s.numChunks) { + if size != s.ChunkSize() && index != int(s.numChunks) { s.jptm.LogAtLevelForCurrentTransfer(pipeline.LogDebug, changedChunkSize) return } @@ -398,3 +413,17 @@ func (s *blockBlobSenderBase) ChunkAlreadyTransferred(index int32) bool { _, ok := s.completedBlockList[int(index)] return ok } + +// GetDestinationLength gets the destination length. +func (s *blockBlobSenderBase) GetDestinationLength() (int64, error) { + prop, err := s.destBlockBlobClient.GetProperties(s.jptm.Context(), &blob.GetPropertiesOptions{CPKInfo: s.jptm.CpkInfo()}) + + if err != nil { + return -1, err + } + + if prop.ContentLength == nil { + return -1, fmt.Errorf("destination content length not returned") + } + return *prop.ContentLength, nil +} diff --git a/ste/sender-blockBlobFromLocal.go b/ste/sender-blockBlobFromLocal.go index 9aead673e..6ac438d6e 100644 --- a/ste/sender-blockBlobFromLocal.go +++ b/ste/sender-blockBlobFromLocal.go @@ -23,10 +23,11 @@ package ste import ( "bytes" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" "sync/atomic" "github.com/Azure/azure-pipeline-go/pipeline" - "github.com/Azure/azure-storage-blob-go/azblob" "github.com/Azure/azure-storage-azcopy/v10/common" ) @@ -37,8 +38,8 @@ type blockBlobUploader struct { md5Channel chan []byte } -func newBlockBlobUploader(jptm IJobPartTransferMgr, destination string, p pipeline.Pipeline, pacer pacer, sip ISourceInfoProvider) (sender, error) { - senderBase, err := newBlockBlobSenderBase(jptm, destination, p, pacer, sip, azblob.AccessTierNone) +func newBlockBlobUploader(jptm IJobPartTransferMgr, destination string, pacer pacer, sip ISourceInfoProvider) (sender, error) { + senderBase, err := newBlockBlobSenderBase(jptm, destination, pacer, sip, "") if err != nil { return nil, err } @@ -51,7 +52,7 @@ func (s *blockBlobUploader) Prologue(ps common.PrologueState) (destinationModifi if unixSIP, ok := s.sip.(IUNIXPropertyBearingSourceInfoProvider); ok { // Clone the metadata before we write to it, we shouldn't be writing to the same metadata as every other blob. - s.metadataToApply = common.Metadata(s.metadataToApply).Clone().ToAzBlobMetadata() + s.metadataToApply = s.metadataToApply.Clone() statAdapter, err := unixSIP.GetUNIXProperties() if err != nil { @@ -102,7 +103,11 @@ func (u *blockBlobUploader) generatePutBlock(id common.ChunkID, blockIndex int32 // step 3: put block to remote u.jptm.LogChunkStatus(id, common.EWaitReason.Body()) body := newPacedRequestBody(u.jptm.Context(), reader, u.pacer) - _, err := u.destBlockBlobURL.StageBlock(u.jptm.Context(), encodedBlockID, body, azblob.LeaseAccessConditions{}, nil, u.cpkToApply) + _, err := u.destBlockBlobClient.StageBlock(u.jptm.Context(), encodedBlockID, body, + &blockblob.StageBlockOptions{ + CPKInfo: u.jptm.CpkInfo(), + CPKScopeInfo: u.jptm.CpkScopeInfo(), + }) if err != nil { u.jptm.FailActiveUpload("Staging block", err) return @@ -121,24 +126,32 @@ func (u *blockBlobUploader) generatePutWholeBlob(id common.ChunkID, blockIndex i // Upload the blob jptm.LogChunkStatus(id, common.EWaitReason.Body()) var err error - if !ValidateTier(jptm, u.destBlobTier, u.destBlockBlobURL.BlobURL, u.jptm.Context(), false) { - u.destBlobTier = azblob.DefaultAccessTier + if !ValidateTier(jptm, u.destBlobTier, u.destBlockBlobClient, u.jptm.Context(), false) { + u.destBlobTier = "" } blobTags := u.blobTagsToApply - separateSetTagsRequired := separateSetTagsRequired(blobTags) - if separateSetTagsRequired || len(blobTags) == 0 { + setTags := separateSetTagsRequired(blobTags) + if setTags || len(blobTags) == 0 { blobTags = nil } // TODO: Remove this snippet once service starts supporting CPK with blob tier - destBlobTier := u.destBlobTier - if u.cpkToApply.EncryptionScope != nil || (u.cpkToApply.EncryptionKey != nil && u.cpkToApply.EncryptionKeySha256 != nil) { - destBlobTier = azblob.AccessTierNone + destBlobTier := &u.destBlobTier + if u.jptm.IsSourceEncrypted() { + destBlobTier = nil } if jptm.Info().SourceSize == 0 { - _, err = u.destBlockBlobURL.Upload(jptm.Context(), bytes.NewReader(nil), u.headersToApply, u.metadataToApply, azblob.BlobAccessConditions{}, destBlobTier, blobTags, u.cpkToApply, azblob.ImmutabilityPolicyOptions{}) + _, err = u.destBlockBlobClient.Upload(jptm.Context(), streaming.NopCloser(bytes.NewReader(nil)), + &blockblob.UploadOptions{ + HTTPHeaders: &u.headersToApply, + Metadata: u.metadataToApply, + Tier: destBlobTier, + Tags: blobTags, + CPKInfo: jptm.CpkInfo(), + CPKScopeInfo: jptm.CpkScopeInfo(), + }) } else { // File with content @@ -148,12 +161,19 @@ func (u *blockBlobUploader) generatePutWholeBlob(id common.ChunkID, blockIndex i jptm.FailActiveUpload("Getting hash", errNoHash) return } - u.headersToApply.ContentMD5 = md5Hash + u.headersToApply.BlobContentMD5 = md5Hash // Upload the file body := newPacedRequestBody(jptm.Context(), reader, u.pacer) - _, err = u.destBlockBlobURL.Upload(jptm.Context(), body, u.headersToApply, u.metadataToApply, - azblob.BlobAccessConditions{}, u.destBlobTier, blobTags, u.cpkToApply, azblob.ImmutabilityPolicyOptions{}) + _, err = u.destBlockBlobClient.Upload(jptm.Context(), body, + &blockblob.UploadOptions{ + HTTPHeaders: &u.headersToApply, + Metadata: u.metadataToApply, + Tier: destBlobTier, + Tags: blobTags, + CPKInfo: jptm.CpkInfo(), + CPKScopeInfo: jptm.CpkScopeInfo(), + }) } // if the put blob is a failure, update the transfer status to failed @@ -164,8 +184,8 @@ func (u *blockBlobUploader) generatePutWholeBlob(id common.ChunkID, blockIndex i atomic.AddInt32(&u.atomicChunksWritten, 1) - if separateSetTagsRequired { - if _, err := u.destBlockBlobURL.SetTags(jptm.Context(), nil, nil, nil, u.blobTagsToApply); err != nil { + if setTags { + if _, err := u.destBlockBlobClient.SetTags(jptm.Context(), u.blobTagsToApply, nil); err != nil { u.jptm.Log(pipeline.LogWarning, err.Error()) } } @@ -181,7 +201,7 @@ func (u *blockBlobUploader) Epilogue() { md5Hash, ok := <-u.md5Channel if ok { - u.headersToApply.ContentMD5 = md5Hash + u.headersToApply.BlobContentMD5 = md5Hash } else { jptm.FailActiveSend("Getting hash", errNoHash) return @@ -190,13 +210,3 @@ func (u *blockBlobUploader) Epilogue() { u.blockBlobSenderBase.Epilogue() } - -func (u *blockBlobUploader) GetDestinationLength() (int64, error) { - prop, err := u.destBlockBlobURL.GetProperties(u.jptm.Context(), azblob.BlobAccessConditions{}, u.cpkToApply) - - if err != nil { - return -1, err - } - - return prop.ContentLength(), nil -} diff --git a/ste/sender-blockBlobFromURL.go b/ste/sender-blockBlobFromURL.go index fa99c7535..dc6e47755 100644 --- a/ste/sender-blockBlobFromURL.go +++ b/ste/sender-blockBlobFromURL.go @@ -23,11 +23,12 @@ package ste import ( "bytes" "fmt" - "net/url" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" "sync/atomic" "github.com/Azure/azure-pipeline-go/pipeline" - "github.com/Azure/azure-storage-blob-go/azblob" "github.com/Azure/azure-storage-azcopy/v10/common" ) @@ -35,20 +36,20 @@ import ( type urlToBlockBlobCopier struct { blockBlobSenderBase - srcURL url.URL + srcURL string } -func newURLToBlockBlobCopier(jptm IJobPartTransferMgr, destination string, p pipeline.Pipeline, pacer pacer, srcInfoProvider IRemoteSourceInfoProvider) (s2sCopier, error) { +func newURLToBlockBlobCopier(jptm IJobPartTransferMgr, destination string, pacer pacer, srcInfoProvider IRemoteSourceInfoProvider) (s2sCopier, error) { // Get blob tier, by default set none. - destBlobTier := azblob.AccessTierNone + var destBlobTier blob.AccessTier // If the source is block blob, preserve source's blob tier. if blobSrcInfoProvider, ok := srcInfoProvider.(IBlobSourceInfoProvider); ok { - if blobSrcInfoProvider.BlobType() == azblob.BlobBlockBlob { + if blobSrcInfoProvider.BlobType() == blob.BlobTypeBlockBlob { destBlobTier = blobSrcInfoProvider.BlobTier() } } - senderBase, err := newBlockBlobSenderBase(jptm, destination, p, pacer, srcInfoProvider, destBlobTier) + senderBase, err := newBlockBlobSenderBase(jptm, destination, pacer, srcInfoProvider, destBlobTier) if err != nil { return nil, err } @@ -60,7 +61,7 @@ func newURLToBlockBlobCopier(jptm IJobPartTransferMgr, destination string, p pip return &urlToBlockBlobCopier{ blockBlobSenderBase: *senderBase, - srcURL: *srcURL}, nil + srcURL: srcURL}, nil } // Returns a chunk-func for blob copies @@ -70,7 +71,7 @@ func (c *urlToBlockBlobCopier) GenerateCopyFunc(id common.ChunkID, blockIndex in return c.generateCreateEmptyBlob(id) } // Small blobs from all sources will be copied over to destination using PutBlobFromUrl - if c.NumChunks() == 1 && adjustedChunkSize <= int64(azblob.BlockBlobMaxUploadBlobBytes) { + if c.NumChunks() == 1 && adjustedChunkSize <= int64(blockblob.MaxUploadBlobBytes) { /* * siminsavani: FYI: For GCP, if the blob is the entirety of the file, GCP still returns * invalid error from service due to PutBlockFromUrl. @@ -91,31 +92,40 @@ func (c *urlToBlockBlobCopier) generateCreateEmptyBlob(id common.ChunkID) chunkF jptm.LogChunkStatus(id, common.EWaitReason.S2SCopyOnWire()) // Create blob and finish. - if !ValidateTier(jptm, c.destBlobTier, c.destBlockBlobURL.BlobURL, c.jptm.Context(), false) { - c.destBlobTier = azblob.DefaultAccessTier + if !ValidateTier(jptm, c.destBlobTier, c.destBlockBlobClient, c.jptm.Context(), false) { + c.destBlobTier = "" } blobTags := c.blobTagsToApply - separateSetTagsRequired := separateSetTagsRequired(blobTags) - if separateSetTagsRequired || len(blobTags) == 0 { + setTags := separateSetTagsRequired(blobTags) + if setTags || len(blobTags) == 0 { blobTags = nil } // TODO: Remove this snippet once service starts supporting CPK with blob tier - destBlobTier := c.destBlobTier - if c.cpkToApply.EncryptionScope != nil || (c.cpkToApply.EncryptionKey != nil && c.cpkToApply.EncryptionKeySha256 != nil) { - destBlobTier = azblob.AccessTierNone + destBlobTier := &c.destBlobTier + if c.jptm.IsSourceEncrypted() { + destBlobTier = nil } - if _, err := c.destBlockBlobURL.Upload(c.jptm.Context(), bytes.NewReader(nil), c.headersToApply, c.metadataToApply, azblob.BlobAccessConditions{}, destBlobTier, blobTags, c.cpkToApply, azblob.ImmutabilityPolicyOptions{}); err != nil { + _, err := c.destBlockBlobClient.Upload(c.jptm.Context(), streaming.NopCloser(bytes.NewReader(nil)), + &blockblob.UploadOptions{ + HTTPHeaders: &c.headersToApply, + Metadata: c.metadataToApply, + Tier: destBlobTier, + Tags: blobTags, + CPKInfo: c.jptm.CpkInfo(), + CPKScopeInfo: c.jptm.CpkScopeInfo(), + }) + if err != nil { jptm.FailActiveSend("Creating empty blob", err) return } atomic.AddInt32(&c.atomicChunksWritten, 1) - if separateSetTagsRequired { - if _, err := c.destBlockBlobURL.SetTags(jptm.Context(), nil, nil, nil, c.blobTagsToApply); err != nil { + if setTags { + if _, err := c.destBlockBlobClient.SetTags(jptm.Context(), c.blobTagsToApply, nil); err != nil { c.jptm.Log(pipeline.LogWarning, err.Error()) } } @@ -143,8 +153,18 @@ func (c *urlToBlockBlobCopier) generatePutBlockFromURL(id common.ChunkID, blockI if err := c.pacer.RequestTrafficAllocation(c.jptm.Context(), adjustedChunkSize); err != nil { c.jptm.FailActiveUpload("Pacing block", err) } - _, err := c.destBlockBlobURL.StageBlockFromURL(c.jptm.Context(), encodedBlockID, c.srcURL, - id.OffsetInFile(), adjustedChunkSize, azblob.LeaseAccessConditions{}, azblob.ModifiedAccessConditions{}, c.cpkToApply, c.jptm.GetS2SSourceBlobTokenCredential()) + token, err := c.jptm.GetS2SSourceTokenCredential(c.jptm.Context()) + if err != nil { + c.jptm.FailActiveS2SCopy("Getting source token credential", err) + return + } + _, err = c.destBlockBlobClient.StageBlockFromURL(c.jptm.Context(), encodedBlockID, c.srcURL, + &blockblob.StageBlockFromURLOptions{ + Range: blob.HTTPRange{Offset: id.OffsetInFile(), Count: adjustedChunkSize}, + CPKInfo: c.jptm.CpkInfo(), + CPKScopeInfo: c.jptm.CpkScopeInfo(), + CopySourceAuthorization: token, + }) if err != nil { c.jptm.FailActiveSend("Staging block from URL", err) return @@ -160,29 +180,41 @@ func (c *urlToBlockBlobCopier) generateStartPutBlobFromURL(id common.ChunkID, bl c.jptm.LogChunkStatus(id, common.EWaitReason.S2SCopyOnWire()) // Create blob and finish. - if !ValidateTier(c.jptm, c.destBlobTier, c.destBlockBlobURL.BlobURL, c.jptm.Context(), false) { - c.destBlobTier = azblob.DefaultAccessTier + if !ValidateTier(c.jptm, c.destBlobTier, c.destBlockBlobClient, c.jptm.Context(), false) { + c.destBlobTier = "" } blobTags := c.blobTagsToApply - separateSetTagsRequired := separateSetTagsRequired(blobTags) - if separateSetTagsRequired || len(blobTags) == 0 { + setTags := separateSetTagsRequired(blobTags) + if setTags || len(blobTags) == 0 { blobTags = nil } // TODO: Remove this snippet once service starts supporting CPK with blob tier - destBlobTier := c.destBlobTier - if c.cpkToApply.EncryptionScope != nil || (c.cpkToApply.EncryptionKey != nil && c.cpkToApply.EncryptionKeySha256 != nil) { - destBlobTier = azblob.AccessTierNone + destBlobTier := &c.destBlobTier + if c.jptm.IsSourceEncrypted() { + destBlobTier = nil } if err := c.pacer.RequestTrafficAllocation(c.jptm.Context(), adjustedChunkSize); err != nil { c.jptm.FailActiveUpload("Pacing block", err) } + token, err := c.jptm.GetS2SSourceTokenCredential(c.jptm.Context()) + if err != nil { + c.jptm.FailActiveS2SCopy("Getting source token credential", err) + return + } - _, err := c.destBlockBlobURL.PutBlobFromURL(c.jptm.Context(), c.headersToApply, c.srcURL, c.metadataToApply, - azblob.ModifiedAccessConditions{}, azblob.BlobAccessConditions{}, nil, nil, destBlobTier, blobTags, - c.cpkToApply, c.jptm.GetS2SSourceBlobTokenCredential()) + _, err = c.destBlockBlobClient.UploadBlobFromURL(c.jptm.Context(), c.srcURL, + &blockblob.UploadBlobFromURLOptions{ + HTTPHeaders: &c.headersToApply, + Metadata: c.metadataToApply, + Tier: destBlobTier, + Tags: blobTags, + CPKInfo: c.jptm.CpkInfo(), + CPKScopeInfo: c.jptm.CpkScopeInfo(), + CopySourceAuthorization: token, + }) if err != nil { c.jptm.FailActiveSend("Put Blob from URL", err) @@ -191,20 +223,10 @@ func (c *urlToBlockBlobCopier) generateStartPutBlobFromURL(id common.ChunkID, bl atomic.AddInt32(&c.atomicChunksWritten, 1) - if separateSetTagsRequired { - if _, err := c.destBlockBlobURL.SetTags(c.jptm.Context(), nil, nil, nil, c.blobTagsToApply); err != nil { + if setTags { + if _, err := c.destBlockBlobClient.SetTags(c.jptm.Context(), c.blobTagsToApply, nil); err != nil { c.jptm.Log(pipeline.LogWarning, err.Error()) } } }) } - -// GetDestinationLength gets the destination length. -func (c *urlToBlockBlobCopier) GetDestinationLength() (int64, error) { - properties, err := c.destBlockBlobURL.GetProperties(c.jptm.Context(), azblob.BlobAccessConditions{}, c.cpkToApply) - if err != nil { - return -1, err - } - - return properties.ContentLength(), nil -} diff --git a/ste/sender-pageBlob.go b/ste/sender-pageBlob.go index 4adcaf3d7..278776f1f 100644 --- a/ste/sender-pageBlob.go +++ b/ste/sender-pageBlob.go @@ -24,35 +24,36 @@ import ( "context" "errors" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob" "net/url" "regexp" "strings" "time" "github.com/Azure/azure-pipeline-go/pipeline" - "github.com/Azure/azure-storage-blob-go/azblob" "github.com/Azure/azure-storage-azcopy/v10/common" ) type pageBlobSenderBase struct { - jptm IJobPartTransferMgr - destPageBlobURL azblob.PageBlobURL - srcSize int64 - chunkSize int64 - numChunks uint32 - pacer pacer + jptm IJobPartTransferMgr + destPageBlobClient *pageblob.Client + srcSize int64 + chunkSize int64 + numChunks uint32 + pacer pacer // Headers and other info that we will apply to the destination // object. For S2S, these come from the source service. // When sending local data, they are computed based on // the properties of the local file - headersToApply azblob.BlobHTTPHeaders - metadataToApply azblob.Metadata - blobTagsToApply azblob.BlobTagsMap - cpkToApply azblob.ClientProvidedKeyOptions + headersToApply blob.HTTPHeaders + metadataToApply common.Metadata + blobTagsToApply common.BlobTags - destBlobTier azblob.AccessTierType + destBlobTier blob.AccessTier // filePacer is necessary because page blobs have per-blob throughput limits. The limits depend on // what type of page blob it is (e.g. premium) and can be significantly lower than the blob account limit. // Using a automatic pacer here lets us find the right rate for this particular page blob, at which @@ -79,33 +80,28 @@ var ( md5NotSupportedInManagedDiskError = errors.New("the Content-MD5 hash is not supported for managed disk uploads") ) -func newPageBlobSenderBase(jptm IJobPartTransferMgr, destination string, p pipeline.Pipeline, pacer pacer, srcInfoProvider ISourceInfoProvider, inferredAccessTierType azblob.AccessTierType) (*pageBlobSenderBase, error) { +func newPageBlobSenderBase(jptm IJobPartTransferMgr, destination string, pacer pacer, srcInfoProvider ISourceInfoProvider, inferredAccessTierType blob.AccessTier) (*pageBlobSenderBase, error) { transferInfo := jptm.Info() // compute chunk count chunkSize := transferInfo.BlockSize // If the given chunk Size for the Job is invalid for page blob or greater than maximum page size, // then set chunkSize as maximum pageSize. - chunkSize = common.Iffint64( - chunkSize > common.DefaultPageBlobChunkSize || (chunkSize%azblob.PageBlobPageBytes != 0), + chunkSize = common.Iff( + chunkSize > common.DefaultPageBlobChunkSize || (chunkSize%pageblob.PageBytes != 0), common.DefaultPageBlobChunkSize, chunkSize) srcSize := transferInfo.SourceSize numChunks := getNumChunks(srcSize, chunkSize) - destURL, err := url.Parse(destination) - if err != nil { - return nil, err - } - - destPageBlobURL := azblob.NewPageBlobURL(*destURL, p) + destPageBlobClient := common.CreatePageBlobClient(destination, jptm.CredentialInfo(), jptm.CredentialOpOptions(), jptm.ClientOptions()) // This is only necessary if our destination is a managed disk impexp account. // Read the in struct explanation if necessary. var destRangeOptimizer *pageRangeOptimizer - if isInManagedDiskImportExportAccount(*destURL) { - destRangeOptimizer = newPageRangeOptimizer(destPageBlobURL, jptm.Context()) + if isInManagedDiskImportExportAccount(destination) { + destRangeOptimizer = newPageRangeOptimizer(destPageBlobClient, jptm.Context()) } props, err := srcInfoProvider.Properties() @@ -121,22 +117,18 @@ func newPageBlobSenderBase(jptm IJobPartTransferMgr, destination string, p pipel destBlobTier = pageBlobTierOverride.ToAccessTierType() } - // Once track2 goes live, we'll not need to do this conversion/casting and can directly use CpkInfo & CpkScopeInfo - cpkToApply := common.ToClientProvidedKeyOptions(jptm.CpkInfo(), jptm.CpkScopeInfo()) - s := &pageBlobSenderBase{ jptm: jptm, - destPageBlobURL: destPageBlobURL, + destPageBlobClient: destPageBlobClient, srcSize: srcSize, chunkSize: chunkSize, numChunks: numChunks, pacer: pacer, - headersToApply: props.SrcHTTPHeaders.ToAzBlobHTTPHeaders(), - metadataToApply: props.SrcMetadata.ToAzBlobMetadata(), - blobTagsToApply: props.SrcBlobTags.ToAzBlobTagsMap(), + headersToApply: props.SrcHTTPHeaders.ToBlobHTTPHeaders(), + metadataToApply: props.SrcMetadata, + blobTagsToApply: props.SrcBlobTags, destBlobTier: destBlobTier, filePacer: NewNullAutoPacer(), // defer creation of real one to Prologue - cpkToApply: cpkToApply, destPageRangeOptimizer: destRangeOptimizer, } @@ -148,12 +140,16 @@ func newPageBlobSenderBase(jptm IJobPartTransferMgr, destination string, p pipel } // these accounts have special restrictions of which APIs operations they support -func isInManagedDiskImportExportAccount(u url.URL) bool { +func isInManagedDiskImportExportAccount(rawURL string) bool { + u, err := url.Parse(rawURL) + if err != nil { + return false + } return strings.HasPrefix(u.Host, managedDiskImportExportAccountPrefix) } func (s *pageBlobSenderBase) isInManagedDiskImportExportAccount() bool { - return isInManagedDiskImportExportAccount(s.destPageBlobURL.URL()) + return isInManagedDiskImportExportAccount(s.destPageBlobClient.URL()) } func (s *pageBlobSenderBase) SendableEntityType() common.EntityType { @@ -169,7 +165,8 @@ func (s *pageBlobSenderBase) NumChunks() uint32 { } func (s *pageBlobSenderBase) RemoteFileExists() (bool, time.Time, error) { - return remoteObjectExists(s.destPageBlobURL.GetProperties(s.jptm.Context(), azblob.BlobAccessConditions{}, s.cpkToApply)) + properties, err := s.destPageBlobClient.GetProperties(s.jptm.Context(), &blob.GetPropertiesOptions{CPKInfo: s.jptm.CpkInfo()}) + return remoteObjectExists(blobPropertiesResponseAdapter{properties}, err) } var premiumPageBlobTierRegex = regexp.MustCompile(`P\d+`) @@ -200,14 +197,18 @@ func (s *pageBlobSenderBase) Prologue(ps common.PrologueState) (destinationModif // FileSize : 1073742336 (equals our s.srcSize, i.e. the size of the disk file) // Size : 1073741824 - p, err := s.destPageBlobURL.GetProperties(s.jptm.Context(), azblob.BlobAccessConditions{}, s.cpkToApply) + p, err := s.destPageBlobClient.GetProperties(s.jptm.Context(), &blob.GetPropertiesOptions{CPKInfo: s.jptm.CpkInfo()}) if err != nil { s.jptm.FailActiveSend("Checking size of managed disk blob", err) return } - if s.srcSize != p.ContentLength() { + if p.ContentLength == nil { + sizeErr := fmt.Errorf("destination content length not returned") + s.jptm.FailActiveSend("Checking size of managed disk blob", sizeErr) + } + if s.srcSize != *p.ContentLength { sizeErr := fmt.Errorf("source file is not same size as the destination page blob. Source size is %d bytes but destination size is %d bytes. Re-create the destination with exactly the right size. E.g. see parameter UploadSizeInBytes in PowerShell's New-AzDiskConfig. Ensure the source is a fixed-size VHD", - s.srcSize, p.ContentLength()) + s.srcSize, *p.ContentLength) s.jptm.FailActiveSend("Checking size of managed disk blob", sizeErr) return } @@ -222,44 +223,44 @@ func (s *pageBlobSenderBase) Prologue(ps common.PrologueState) (destinationModif if s.jptm.ShouldInferContentType() { // sometimes, specifically when reading local files, we have more info // about the file type at this time than what we had before - s.headersToApply.ContentType = ps.GetInferredContentType(s.jptm) + s.headersToApply.BlobContentType = ps.GetInferredContentType(s.jptm) } - destBlobTier := azblob.PremiumPageBlobAccessTierType(s.destBlobTier) - if !ValidateTier(s.jptm, s.destBlobTier, s.destPageBlobURL.BlobURL, s.jptm.Context(), false) { - destBlobTier = azblob.DefaultPremiumBlobAccessTier + t := pageblob.PremiumPageBlobAccessTier(s.destBlobTier) + destBlobTier := &t + if !ValidateTier(s.jptm, s.destBlobTier, s.destPageBlobClient, s.jptm.Context(), false) { + destBlobTier = nil + } + // TODO: Remove this snippet once service starts supporting CPK with blob tier + if s.jptm.IsSourceEncrypted() { + destBlobTier = nil } blobTags := s.blobTagsToApply - separateSetTagsRequired := separateSetTagsRequired(blobTags) - if separateSetTagsRequired || len(blobTags) == 0 { + setTags := separateSetTagsRequired(blobTags) + if setTags || len(blobTags) == 0 { blobTags = nil } - // TODO: Remove this snippet once service starts supporting CPK with blob tier - if s.cpkToApply.EncryptionScope != nil || (s.cpkToApply.EncryptionKey != nil && s.cpkToApply.EncryptionKeySha256 != nil) { - destBlobTier = azblob.PremiumPageBlobAccessTierNone - } - - if _, err := s.destPageBlobURL.Create(s.jptm.Context(), - s.srcSize, - 0, - s.headersToApply, - s.metadataToApply, - azblob.BlobAccessConditions{}, - destBlobTier, - blobTags, - s.cpkToApply, - azblob.ImmutabilityPolicyOptions{}, - ); err != nil { + _, err := s.destPageBlobClient.Create(s.jptm.Context(), s.srcSize, + &pageblob.CreateOptions{ + SequenceNumber: to.Ptr(int64(0)), + HTTPHeaders: &s.headersToApply, + Metadata: s.metadataToApply, + Tier: destBlobTier, + Tags: blobTags, + CPKInfo: s.jptm.CpkInfo(), + CPKScopeInfo: s.jptm.CpkScopeInfo(), + }) + if err != nil { s.jptm.FailActiveSend("Creating blob", err) return } destinationModified = true - if separateSetTagsRequired { - if _, err := s.destPageBlobURL.SetTags(s.jptm.Context(), nil, nil, nil, s.blobTagsToApply); err != nil { + if setTags { + if _, err := s.destPageBlobClient.SetTags(s.jptm.Context(), s.blobTagsToApply, nil); err != nil { s.jptm.Log(pipeline.LogWarning, err.Error()) } } @@ -281,10 +282,24 @@ func (s *pageBlobSenderBase) Cleanup() { } else { deletionContext, cancelFunc := context.WithTimeout(context.WithValue(context.Background(), ServiceAPIVersionOverride, DefaultServiceApiVersion), 30*time.Second) defer cancelFunc() - _, err := s.destPageBlobURL.Delete(deletionContext, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) + _, err := s.destPageBlobClient.Delete(deletionContext, nil) if err != nil { - jptm.LogError(s.destPageBlobURL.String(), "Delete (incomplete) Page Blob ", err) + jptm.LogError(s.destPageBlobClient.URL(), "Delete (incomplete) Page Blob ", err) } } } } + +// GetDestinationLength gets the destination length. +func (s *pageBlobSenderBase) GetDestinationLength() (int64, error) { + prop, err := s.destPageBlobClient.GetProperties(s.jptm.Context(), &blob.GetPropertiesOptions{CPKInfo: s.jptm.CpkInfo()}) + + if err != nil { + return -1, err + } + + if prop.ContentLength == nil { + return -1, fmt.Errorf("destination content length not returned") + } + return *prop.ContentLength, nil +} diff --git a/ste/sender-pageBlobFromLocal.go b/ste/sender-pageBlobFromLocal.go index bcb7aef05..8836f697f 100644 --- a/ste/sender-pageBlobFromLocal.go +++ b/ste/sender-pageBlobFromLocal.go @@ -22,10 +22,12 @@ package ste import ( "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob" "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" ) type pageBlobUploader struct { @@ -35,8 +37,8 @@ type pageBlobUploader struct { sip ISourceInfoProvider } -func newPageBlobUploader(jptm IJobPartTransferMgr, destination string, p pipeline.Pipeline, pacer pacer, sip ISourceInfoProvider) (sender, error) { - senderBase, err := newPageBlobSenderBase(jptm, destination, p, pacer, sip, azblob.AccessTierNone) +func newPageBlobUploader(jptm IJobPartTransferMgr, destination string, pacer pacer, sip ISourceInfoProvider) (sender, error) { + senderBase, err := newPageBlobSenderBase(jptm, destination, pacer, sip, "") if err != nil { return nil, err } @@ -48,7 +50,7 @@ func (u *pageBlobUploader) Prologue(ps common.PrologueState) (destinationModifie if u.jptm.Info().PreservePOSIXProperties { if unixSIP, ok := u.sip.(IUNIXPropertyBearingSourceInfoProvider); ok { // Clone the metadata before we write to it, we shouldn't be writing to the same metadata as every other blob. - u.metadataToApply = common.Metadata(u.metadataToApply).Clone().ToAzBlobMetadata() + u.metadataToApply = u.metadataToApply.Clone() statAdapter, err := unixSIP.GetUNIXProperties() if err != nil { @@ -84,9 +86,9 @@ func (u *pageBlobUploader) GenerateUploadFunc(id common.ChunkID, blockIndex int3 // in the event the page blob uploader is sending to a managed disk. if u.destPageRangeOptimizer != nil { destContainsData = u.destPageRangeOptimizer.doesRangeContainData( - azblob.PageRange{ - Start: id.OffsetInFile(), - End: id.OffsetInFile() + reader.Length() - 1, + pageblob.PageRange{ + Start: to.Ptr(id.OffsetInFile()), + End: to.Ptr(id.OffsetInFile() + reader.Length() - 1), }) } @@ -112,7 +114,11 @@ func (u *pageBlobUploader) GenerateUploadFunc(id common.ChunkID, blockIndex int3 jptm.LogChunkStatus(id, common.EWaitReason.Body()) body := newPacedRequestBody(jptm.Context(), reader, u.pacer) enrichedContext := withRetryNotification(jptm.Context(), u.filePacer) - _, err := u.destPageBlobURL.UploadPages(enrichedContext, id.OffsetInFile(), body, azblob.PageBlobAccessConditions{}, nil, u.cpkToApply) + _, err := u.destPageBlobClient.UploadPages(enrichedContext, body, blob.HTTPRange{Offset: id.OffsetInFile(), Count: reader.Length()}, + &pageblob.UploadPagesOptions{ + CPKInfo: u.jptm.CpkInfo(), + CPKScopeInfo: u.jptm.CpkScopeInfo(), + }) if err != nil { jptm.FailActiveUpload("Uploading page", err) return @@ -127,21 +133,11 @@ func (u *pageBlobUploader) Epilogue() { if jptm.IsLive() && !u.isInManagedDiskImportExportAccount() { tryPutMd5Hash(jptm, u.md5Channel, func(md5Hash []byte) error { epilogueHeaders := u.headersToApply - epilogueHeaders.ContentMD5 = md5Hash - _, err := u.destPageBlobURL.SetHTTPHeaders(jptm.Context(), epilogueHeaders, azblob.BlobAccessConditions{}) + epilogueHeaders.BlobContentMD5 = md5Hash + _, err := u.destPageBlobClient.SetHTTPHeaders(jptm.Context(), epilogueHeaders, nil) return err }) } u.pageBlobSenderBase.Epilogue() } - -func (u *pageBlobUploader) GetDestinationLength() (int64, error) { - prop, err := u.destPageBlobURL.GetProperties(u.jptm.Context(), azblob.BlobAccessConditions{}, u.cpkToApply) - - if err != nil { - return -1, err - } - - return prop.ContentLength(), nil -} diff --git a/ste/sender-pageBlobFromURL.go b/ste/sender-pageBlobFromURL.go index adb953098..0b846eca0 100644 --- a/ste/sender-pageBlobFromURL.go +++ b/ste/sender-pageBlobFromURL.go @@ -22,48 +22,48 @@ package ste import ( "context" - "net/url" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob" "strings" - "github.com/Azure/azure-pipeline-go/pipeline" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-azcopy/v10/common" ) type urlToPageBlobCopier struct { pageBlobSenderBase - srcURL url.URL + srcURL string sourcePageRangeOptimizer *pageRangeOptimizer // nil if src is not a page blob } -func newURLToPageBlobCopier(jptm IJobPartTransferMgr, destination string, p pipeline.Pipeline, pacer pacer, srcInfoProvider IRemoteSourceInfoProvider) (s2sCopier, error) { +func newURLToPageBlobCopier(jptm IJobPartTransferMgr, destination string, pacer pacer, srcInfoProvider IRemoteSourceInfoProvider) (s2sCopier, error) { srcURL, err := srcInfoProvider.PreSignedSourceURL() if err != nil { return nil, err } + srcPageBlobClient := common.CreatePageBlobClient(srcURL, jptm.S2SSourceCredentialInfo(), jptm.CredentialOpOptions(), jptm.ClientOptions()) - destBlobTier := azblob.AccessTierNone + var destBlobTier blob.AccessTier var pageRangeOptimizer *pageRangeOptimizer if blobSrcInfoProvider, ok := srcInfoProvider.(IBlobSourceInfoProvider); ok { - if blobSrcInfoProvider.BlobType() == azblob.BlobPageBlob { + if blobSrcInfoProvider.BlobType() == blob.BlobTypePageBlob { // if the source is page blob, preserve source's blob tier. destBlobTier = blobSrcInfoProvider.BlobTier() // capture the necessary info so that we can perform optimizations later - pageRangeOptimizer = newPageRangeOptimizer(azblob.NewPageBlobURL(*srcURL, jptm.SourceProviderPipeline()), jptm.Context()) + pageRangeOptimizer = newPageRangeOptimizer(srcPageBlobClient, jptm.Context()) } } - senderBase, err := newPageBlobSenderBase(jptm, destination, p, pacer, srcInfoProvider, destBlobTier) + senderBase, err := newPageBlobSenderBase(jptm, destination, pacer, srcInfoProvider, destBlobTier) if err != nil { return nil, err } return &urlToPageBlobCopier{ pageBlobSenderBase: *senderBase, - srcURL: *srcURL, + srcURL: srcURL, sourcePageRangeOptimizer: pageRangeOptimizer}, nil } @@ -87,7 +87,7 @@ func (c *urlToPageBlobCopier) GenerateCopyFunc(id common.ChunkID, blockIndex int } // if there's no data at the source (and the destination for managed disks), skip this chunk - pageRange := azblob.PageRange{Start: id.OffsetInFile(), End: id.OffsetInFile() + adjustedChunkSize - 1} + pageRange := pageblob.PageRange{Start: to.Ptr(id.OffsetInFile()), End: to.Ptr(id.OffsetInFile() + adjustedChunkSize - 1)} if c.sourcePageRangeOptimizer != nil && !c.sourcePageRangeOptimizer.doesRangeContainData(pageRange) { var destContainsData bool @@ -120,9 +120,17 @@ func (c *urlToPageBlobCopier) GenerateCopyFunc(id common.ChunkID, blockIndex int if err := c.pacer.RequestTrafficAllocation(c.jptm.Context(), adjustedChunkSize); err != nil { c.jptm.FailActiveUpload("Pacing block (global level)", err) } - _, err := c.destPageBlobURL.UploadPagesFromURL( - enrichedContext, c.srcURL, id.OffsetInFile(), id.OffsetInFile(), adjustedChunkSize, nil, - azblob.PageBlobAccessConditions{}, azblob.ModifiedAccessConditions{}, c.cpkToApply, c.jptm.GetS2SSourceBlobTokenCredential()) + token, err := c.jptm.GetS2SSourceTokenCredential(c.jptm.Context()) + if err != nil { + c.jptm.FailActiveS2SCopy("Getting source token credential", err) + return + } + _, err = c.destPageBlobClient.UploadPagesFromURL(enrichedContext, c.srcURL, id.OffsetInFile(), id.OffsetInFile(), adjustedChunkSize, + &pageblob.UploadPagesFromURLOptions{ + CPKInfo: c.jptm.CpkInfo(), + CPKScopeInfo: c.jptm.CpkScopeInfo(), + CopySourceAuthorization: token, + }) if err != nil { c.jptm.FailActiveS2SCopy("Uploading page from URL", err) return @@ -130,28 +138,18 @@ func (c *urlToPageBlobCopier) GenerateCopyFunc(id common.ChunkID, blockIndex int }) } -// GetDestinationLength gets the destination length. -func (c *urlToPageBlobCopier) GetDestinationLength() (int64, error) { - properties, err := c.destPageBlobURL.GetProperties(c.jptm.Context(), azblob.BlobAccessConditions{}, c.cpkToApply) - if err != nil { - return -1, err - } - - return properties.ContentLength(), nil -} - // isolate the logic to fetch page ranges for a page blob, and check whether a given range has data // for two purposes: // 1. capture the necessary info to do so, so that fetchPages can be invoked anywhere // 2. open to extending the logic, which could be re-used for both download and s2s scenarios type pageRangeOptimizer struct { - srcPageBlobURL azblob.PageBlobURL + srcPageBlobClient *pageblob.Client ctx context.Context - srcPageList *azblob.PageList // nil if src is not a page blob, or it was not possible to get a response + srcPageList *pageblob.PageList // nil if src is not a page blob, or it was not possible to get a response } -func newPageRangeOptimizer(srcPageBlobURL azblob.PageBlobURL, ctx context.Context) *pageRangeOptimizer { - return &pageRangeOptimizer{srcPageBlobURL: srcPageBlobURL, ctx: ctx} +func newPageRangeOptimizer(srcPageBlobClient *pageblob.Client, ctx context.Context) *pageRangeOptimizer { + return &pageRangeOptimizer{srcPageBlobClient: srcPageBlobClient, ctx: ctx} } func (p *pageRangeOptimizer) fetchPages() { @@ -169,14 +167,24 @@ func (p *pageRangeOptimizer) fetchPages() { // TODO follow up with the service folks to confirm the scale at which the timeouts occur // TODO perhaps we need to add more logic here to optimize for more cases limitedContext := withNoRetryForBlob(p.ctx) // we don't want retries here. If it doesn't work the first time, we don't want to chew up (lots) time retrying - pageList, err := p.srcPageBlobURL.GetPageRanges(limitedContext, 0, 0, azblob.BlobAccessConditions{}) - if err == nil { - p.srcPageList = pageList + pager := p.srcPageBlobClient.NewGetPageRangesPager(nil) + + for pager.More() { + pageList, err := pager.NextPage(limitedContext) + if err == nil { + if p.srcPageList == nil { + p.srcPageList = &pageList.PageList + } else { + p.srcPageList.PageRange = append(p.srcPageList.PageRange, pageList.PageRange...) + p.srcPageList.ClearRange = append(p.srcPageList.ClearRange, pageList.ClearRange...) + p.srcPageList.NextMarker = pageList.NextMarker + } + } } } // check whether a particular given range is worth transferring, i.e. whether there's data at the source -func (p *pageRangeOptimizer) doesRangeContainData(givenRange azblob.PageRange) bool { +func (p *pageRangeOptimizer) doesRangeContainData(givenRange pageblob.PageRange) bool { // if we have no page list stored, then assume there's data everywhere // (this is particularly important when we are using this code not just for performance, but also // for correctness - as we do when using on the destination of a managed disk upload) @@ -186,13 +194,13 @@ func (p *pageRangeOptimizer) doesRangeContainData(givenRange azblob.PageRange) b // note that the page list is ordered in increasing order (in terms of position) for _, srcRange := range p.srcPageList.PageRange { - if givenRange.End < srcRange.Start { + if *givenRange.End < *srcRange.Start { // case 1: due to the nature of the list (it's sorted), if we've reached such a srcRange // we've checked all the appropriate srcRange already and haven't found any overlapping srcRange // given range: | | // source range: | | return false - } else if srcRange.End < givenRange.Start { + } else if *srcRange.End < *givenRange.Start { // case 2: the givenRange comes after srcRange, continue checking // given range: | | // source range: | | diff --git a/ste/sender.go b/ste/sender.go index 152c3c7d2..096876f01 100644 --- a/ste/sender.go +++ b/ste/sender.go @@ -22,11 +22,10 @@ package ste import ( "errors" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "time" "github.com/Azure/azure-pipeline-go/pipeline" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-azcopy/v10/common" ) @@ -165,7 +164,7 @@ func getNumChunks(fileSize int64, chunkSize int64) uint32 { numChunks := uint32(1) // we always map zero-size source files to ONE (empty) chunk if fileSize > 0 { chunkSizeI := chunkSize - numChunks = common.Iffuint32( + numChunks = common.Iff( fileSize%chunkSizeI == 0, uint32(fileSize/chunkSizeI), uint32(fileSize/chunkSizeI)+1) @@ -213,37 +212,37 @@ func createChunkFunc(setDoneStatusOnExit bool, jptm IJobPartTransferMgr, id comm // newBlobUploader detects blob type and creates a uploader manually func newBlobUploader(jptm IJobPartTransferMgr, destination string, p pipeline.Pipeline, pacer pacer, sip ISourceInfoProvider) (sender, error) { override := jptm.BlobTypeOverride() - intendedType := override.ToAzBlobType() + intendedType := override.ToBlobType() if override == common.EBlobType.Detect() { - intendedType = inferBlobType(jptm.Info().Source, azblob.BlobBlockBlob) + intendedType = inferBlobType(jptm.Info().Source, blob.BlobTypeBlockBlob) // jptm.LogTransferInfo(fmt.Sprintf("Autodetected %s blob type as %s.", jptm.Info().Source , intendedType)) // TODO: Log these? @JohnRusk and @zezha-msft this creates quite a bit of spam in the logs but is important info. // TODO: Perhaps we should log it only if it isn't a block blob? } if jptm.Info().IsFolderPropertiesTransfer() { - return newBlobFolderSender(jptm, destination, p, pacer, sip) + return newBlobFolderSender(jptm, destination, sip) } else if jptm.Info().EntityType == common.EEntityType.Symlink() { - return newBlobSymlinkSender(jptm, destination, p, pacer, sip) + return newBlobSymlinkSender(jptm, destination, sip) } switch intendedType { - case azblob.BlobBlockBlob: - return newBlockBlobUploader(jptm, destination, p, pacer, sip) - case azblob.BlobPageBlob: - return newPageBlobUploader(jptm, destination, p, pacer, sip) - case azblob.BlobAppendBlob: - return newAppendBlobUploader(jptm, destination, p, pacer, sip) + case blob.BlobTypeBlockBlob: + return newBlockBlobUploader(jptm, destination, pacer, sip) + case blob.BlobTypePageBlob: + return newPageBlobUploader(jptm, destination, pacer, sip) + case blob.BlobTypeAppendBlob: + return newAppendBlobUploader(jptm, destination, pacer, sip) default: - return newBlockBlobUploader(jptm, destination, p, pacer, sip) // If no blob type was inferred, assume block blob. + return newBlockBlobUploader(jptm, destination, pacer, sip) // If no blob type was inferred, assume block blob. } } const TagsHeaderMaxLength = 2000 // If length of tags <= 2kb, pass it in the header x-ms-tags. Else do a separate SetTags call -func separateSetTagsRequired(tagsMap azblob.BlobTagsMap) bool { +func separateSetTagsRequired(tagsMap common.BlobTags) bool { tagsLength := 0 for k, v := range tagsMap { tagsLength += len(k) + len(v) + 2 diff --git a/ste/sender_pageBlobFromURL_test.go b/ste/sender_pageBlobFromURL_test.go index a0f1c1c91..d22fa7ee7 100644 --- a/ste/sender_pageBlobFromURL_test.go +++ b/ste/sender_pageBlobFromURL_test.go @@ -21,7 +21,8 @@ package ste import ( - "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob" "github.com/stretchr/testify/assert" "testing" ) @@ -30,21 +31,21 @@ func TestRangeWorthTransferring(t *testing.T) { a := assert.New(t) // Arrange copier := pageRangeOptimizer{} - copier.srcPageList = &azblob.PageList{ - PageRange: []azblob.PageRange{ - {Start: 512, End: 1023}, - {Start: 2560, End: 4095}, - {Start: 7168, End: 8191}, + copier.srcPageList = &pageblob.PageList{ + PageRange: []*pageblob.PageRange{ + {Start: to.Ptr(int64(512)), End: to.Ptr(int64(1023))}, + {Start: to.Ptr(int64(2560)), End: to.Ptr(int64(4095))}, + {Start: to.Ptr(int64(7168)), End: to.Ptr(int64(8191))}, }, } - testCases := map[azblob.PageRange]bool{ - {Start: 512, End: 1023}: true, // fully included - {Start: 2048, End: 3071}: true, // overlapping - {Start: 3071, End: 4606}: true, // overlapping - {Start: 0, End: 511}: false, // before all ranges - {Start: 1536, End: 2559}: false, // in between ranges - {Start: 15360, End: 15871}: false, // all the way out + testCases := map[pageblob.PageRange]bool{ + {Start: to.Ptr(int64(512)), End: to.Ptr(int64(1023))}: true, // fully included + {Start: to.Ptr(int64(2048)), End: to.Ptr(int64(3071))}: true, // overlapping + {Start: to.Ptr(int64(3071)), End: to.Ptr(int64(4606))}: true, // overlapping + {Start: to.Ptr(int64(0)), End: to.Ptr(int64(511))}: false, // before all ranges + {Start: to.Ptr(int64(1536)), End: to.Ptr(int64(2559))}: false, // in between ranges + {Start: to.Ptr(int64(15360)), End: to.Ptr(int64(15871))}: false, // all the way out } // Action & Assert diff --git a/ste/sourceInfoProvider-Blob.go b/ste/sourceInfoProvider-Blob.go index 12df77173..87fcf05b2 100644 --- a/ste/sourceInfoProvider-Blob.go +++ b/ste/sourceInfoProvider-Blob.go @@ -21,6 +21,7 @@ package ste import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "io" "net/url" "strings" @@ -28,8 +29,6 @@ import ( "github.com/Azure/azure-storage-azcopy/v10/azbfs" "github.com/Azure/azure-storage-azcopy/v10/common" - - "github.com/Azure/azure-storage-blob-go/azblob" ) // Source info provider for Azure blob @@ -41,15 +40,18 @@ func (p *blobSourceInfoProvider) IsDFSSource() bool { return p.jptm.FromTo().From() == common.ELocation.BlobFS() } -func (p *blobSourceInfoProvider) internalPresignedURL(useHNS bool) (*url.URL, error) { +func (p *blobSourceInfoProvider) internalPresignedURL(useHNS bool) (string, error) { uri, err := p.defaultRemoteSourceInfoProvider.PreSignedSourceURL() if err != nil { - return nil, err + return "", err } // This will have no real effect on non-standard endpoints (e.g. emulator, stack), and *may* work, but probably won't. // However, Stack/Emulator don't support HNS, so, this won't get use. - bURLParts := azblob.NewBlobURLParts(*uri) + bURLParts, err := blob.ParseURL(uri) + if err != nil { + return "", err + } if useHNS { bURLParts.Host = strings.Replace(bURLParts.Host, ".blob", ".dfs", 1) @@ -61,39 +63,34 @@ func (p *blobSourceInfoProvider) internalPresignedURL(useHNS bool) (*url.URL, er } else { bURLParts.Host = strings.Replace(bURLParts.Host, ".dfs", ".blob", 1) } - out := bURLParts.URL() - return &out, nil + return bURLParts.String(), nil } -func (p *blobSourceInfoProvider) PreSignedSourceURL() (*url.URL, error) { +func (p *blobSourceInfoProvider) PreSignedSourceURL() (string, error) { return p.internalPresignedURL(false) // prefer to return the blob URL; data can be read from either endpoint. } func (p *blobSourceInfoProvider) ReadLink() (string, error) { - uri, err := p.internalPresignedURL(false) + source, err := p.internalPresignedURL(false) if err != nil { return "", err } + blobClient := common.CreateBlobClient(source, p.jptm.S2SSourceCredentialInfo(), p.jptm.CredentialOpOptions(), p.jptm.S2SSourceClientOptions()) - pl := p.jptm.SourceProviderPipeline() ctx := p.jptm.Context() - blobURL := azblob.NewBlockBlobURL(*uri, pl) - - clientProvidedKey := azblob.ClientProvidedKeyOptions{} - if p.jptm.IsSourceEncrypted() { - clientProvidedKey = common.ToClientProvidedKeyOptions(p.jptm.CpkInfo(), p.jptm.CpkScopeInfo()) - } - - resp, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, clientProvidedKey) + resp, err := blobClient.DownloadStream(ctx, &blob.DownloadStreamOptions{ + CPKInfo: p.jptm.CpkInfo(), + CPKScopeInfo: p.jptm.CpkScopeInfo(), + }) if err != nil { return "", err } - symlinkBuf, err := io.ReadAll(resp.Body(azblob.RetryReaderOptions{ - MaxRetryRequests: 5, - NotifyFailedRead: common.NewReadLogFunc(p.jptm, uri), + symlinkBuf, err := io.ReadAll(resp.NewRetryReader(ctx, &blob.RetryReaderOptions{ + MaxRetries: 5, + OnFailedRead: common.NewBlobReadLogFunc(p.jptm, source), })) if err != nil { return "", err @@ -108,7 +105,7 @@ func (p *blobSourceInfoProvider) GetUNIXProperties() (common.UnixStatAdapter, er return nil, err } - return common.ReadStatFromMetadata(prop.SrcMetadata.ToAzBlobMetadata(), p.SourceSize()) + return common.ReadStatFromMetadata(prop.SrcMetadata, p.SourceSize()) } func (p *blobSourceInfoProvider) HasUNIXProperties() bool { @@ -142,36 +139,45 @@ func (p *blobSourceInfoProvider) AccessControl() (azbfs.BlobFSAccessControl, err if err != nil { return azbfs.BlobFSAccessControl{}, err } - - fURL := azbfs.NewFileURL(*presignedURL, p.jptm.SecondarySourceProviderPipeline()) + parsedURL, err := blob.ParseURL(presignedURL) + if err != nil { + return azbfs.BlobFSAccessControl{}, err + } + parsedURL.Host = strings.ReplaceAll(parsedURL.Host, ".blob", ".dfs") + if parsedURL.BlobName != "" { + parsedURL.BlobName = strings.TrimSuffix(parsedURL.BlobName, "/") // BlobFS doesn't handle folders correctly like this. + } else { + parsedURL.BlobName = "/" // container level perms MUST have a / + } + u, err := url.Parse(parsedURL.String()) + if err != nil { + return azbfs.BlobFSAccessControl{}, err + } + // todo: jank, and violates the principle of interfaces + fURL := azbfs.NewFileURL(*u, p.jptm.(*jobPartTransferMgr).jobPartMgr.(*jobPartMgr).secondarySourceProviderPipeline) return fURL.GetAccessControl(p.jptm.Context()) } -func (p *blobSourceInfoProvider) BlobTier() azblob.AccessTierType { +func (p *blobSourceInfoProvider) BlobTier() blob.AccessTier { return p.transferInfo.S2SSrcBlobTier } -func (p *blobSourceInfoProvider) BlobType() azblob.BlobType { +func (p *blobSourceInfoProvider) BlobType() blob.BlobType { return p.transferInfo.SrcBlobType } func (p *blobSourceInfoProvider) GetFreshFileLastModifiedTime() (time.Time, error) { // We can't set a custom LMT on HNS, so it doesn't make sense to swap here. - presignedURL, err := p.internalPresignedURL(false) + source, err := p.internalPresignedURL(false) if err != nil { return time.Time{}, err } - blobURL := azblob.NewBlobURL(*presignedURL, p.jptm.SourceProviderPipeline()) - clientProvidedKey := azblob.ClientProvidedKeyOptions{} - if p.jptm.IsSourceEncrypted() { - clientProvidedKey = common.ToClientProvidedKeyOptions(p.jptm.CpkInfo(), p.jptm.CpkScopeInfo()) - } + blobClient := common.CreateBlobClient(source, p.jptm.S2SSourceCredentialInfo(), p.jptm.CredentialOpOptions(), p.jptm.S2SSourceClientOptions()) - properties, err := blobURL.GetProperties(p.jptm.Context(), azblob.BlobAccessConditions{}, clientProvidedKey) + properties, err := blobClient.GetProperties(p.jptm.Context(), &blob.GetPropertiesOptions{CPKInfo: p.jptm.CpkInfo()}) if err != nil { return time.Time{}, err } - - return properties.LastModified(), nil + return common.IffNotNil(properties.LastModified, time.Time{}), nil } diff --git a/ste/sourceInfoProvider-File.go b/ste/sourceInfoProvider-File.go index 9010acd18..e57297ab5 100644 --- a/ste/sourceInfoProvider-File.go +++ b/ste/sourceInfoProvider-File.go @@ -22,22 +22,21 @@ package ste import ( "context" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/directory" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" "sync" "time" - "github.com/Azure/azure-storage-file-go/azfile" - "github.com/Azure/azure-storage-azcopy/v10/common" ) -type richSMBPropertyHolder interface { - azfile.SMBPropertyHolder +type shareFilePropertyProvider interface { + FileCreationTime() time.Time + FileLastWriteTime() time.Time + FileAttributes() (*file.NTFSFileAttributes, error) FilePermissionKey() string - NewMetadata() azfile.Metadata + Metadata() map[string]*string LastModified() time.Time -} - -type contentPropsProvider interface { CacheControl() string ContentDisposition() string ContentEncoding() string @@ -46,12 +45,116 @@ type contentPropsProvider interface { ContentMD5() []byte } +type fileGetPropertiesAdapter struct { + GetProperties file.GetPropertiesResponse +} + +func (f fileGetPropertiesAdapter) CacheControl() string { + return common.IffNotNil(f.GetProperties.CacheControl, "") +} + +func (f fileGetPropertiesAdapter) ContentDisposition() string { + return common.IffNotNil(f.GetProperties.ContentDisposition, "") +} + +func (f fileGetPropertiesAdapter) ContentEncoding() string { + return common.IffNotNil(f.GetProperties.ContentEncoding, "") +} + +func (f fileGetPropertiesAdapter) ContentLanguage() string { + return common.IffNotNil(f.GetProperties.ContentLanguage, "") +} + +func (f fileGetPropertiesAdapter) ContentType() string { + return common.IffNotNil(f.GetProperties.ContentType, "") +} + +func (f fileGetPropertiesAdapter) ContentMD5() []byte { + return f.GetProperties.ContentMD5 +} + +func (f fileGetPropertiesAdapter) FileCreationTime() time.Time { + return common.IffNotNil(f.GetProperties.FileCreationTime, time.Time{}) +} + +func (f fileGetPropertiesAdapter) FileLastWriteTime() time.Time { + return common.IffNotNil(f.GetProperties.FileLastWriteTime, time.Time{}) +} + +func (f fileGetPropertiesAdapter) FileAttributes() (*file.NTFSFileAttributes, error) { + return file.ParseNTFSFileAttributes(f.GetProperties.FileAttributes) +} + +func (f fileGetPropertiesAdapter) FilePermissionKey() string { + return common.IffNotNil(f.GetProperties.FilePermissionKey, "") +} + +func (f fileGetPropertiesAdapter) Metadata() map[string]*string { + return f.GetProperties.Metadata +} + +func (f fileGetPropertiesAdapter) LastModified() time.Time { + return common.IffNotNil(f.GetProperties.LastModified, time.Time{}) +} + +type directoryGetPropertiesAdapter struct { + GetProperties directory.GetPropertiesResponse +} + +func (d directoryGetPropertiesAdapter) CacheControl() string { + return "" +} + +func (d directoryGetPropertiesAdapter) ContentDisposition() string { + return "" +} + +func (d directoryGetPropertiesAdapter) ContentEncoding() string { + return "" +} + +func (d directoryGetPropertiesAdapter) ContentLanguage() string { + return "" +} + +func (d directoryGetPropertiesAdapter) ContentType() string { + return "" +} + +func (d directoryGetPropertiesAdapter) ContentMD5() []byte { + return make([]byte, 0) +} + +func (d directoryGetPropertiesAdapter) FileCreationTime() time.Time { + return common.IffNotNil(d.GetProperties.FileCreationTime, time.Time{}) +} + +func (d directoryGetPropertiesAdapter) FileLastWriteTime() time.Time { + return common.IffNotNil(d.GetProperties.FileLastWriteTime, time.Time{}) +} + +func (d directoryGetPropertiesAdapter) FileAttributes() (*file.NTFSFileAttributes, error) { + return file.ParseNTFSFileAttributes(d.GetProperties.FileAttributes) +} + +func (d directoryGetPropertiesAdapter) FilePermissionKey() string { + return common.IffNotNil(d.GetProperties.FilePermissionKey, "") +} + +func (d directoryGetPropertiesAdapter) Metadata() map[string]*string { + return d.GetProperties.Metadata +} + +func (d directoryGetPropertiesAdapter) LastModified() time.Time { + return common.IffNotNil(d.GetProperties.LastModified, time.Time{}) +} + // Source info provider for Azure blob type fileSourceInfoProvider struct { ctx context.Context cachedPermissionKey string cacheOnce *sync.Once - cachedProperties richSMBPropertyHolder // use interface because may be file or directory properties + cachedProperties shareFilePropertyProvider // use interface because may be file or directory properties defaultRemoteSourceInfoProvider } @@ -67,19 +170,21 @@ func newFileSourceInfoProvider(jptm IJobPartTransferMgr) (ISourceInfoProvider, e return &fileSourceInfoProvider{defaultRemoteSourceInfoProvider: *base, ctx: jptm.Context(), cacheOnce: &sync.Once{}}, nil } -func (p *fileSourceInfoProvider) getFreshProperties() (richSMBPropertyHolder, error) { - presigned, err := p.PreSignedSourceURL() +func (p *fileSourceInfoProvider) getFreshProperties() (shareFilePropertyProvider, error) { + source, err := p.PreSignedSourceURL() if err != nil { return nil, err } switch p.EntityType() { case common.EEntityType.File(): - fileURL := azfile.NewFileURL(*presigned, p.jptm.SourceProviderPipeline()) - return fileURL.GetProperties(p.ctx) + fileClient := common.CreateShareFileClient(source, p.jptm.S2SSourceCredentialInfo(), p.jptm.CredentialOpOptions(), p.jptm.S2SSourceClientOptions()) + props, err := fileClient.GetProperties(p.ctx, nil) + return &fileGetPropertiesAdapter{props}, err case common.EEntityType.Folder(): - dirURL := azfile.NewDirectoryURL(*presigned, p.jptm.SourceProviderPipeline()) - return dirURL.GetProperties(p.ctx) + directoryClient := common.CreateShareDirectoryClient(source, p.jptm.S2SSourceCredentialInfo(), p.jptm.CredentialOpOptions(), p.jptm.S2SSourceClientOptions()) + props, err := directoryClient.GetProperties(p.ctx, nil) + return &directoryGetPropertiesAdapter{props}, err default: panic("unexpected case") } @@ -87,7 +192,7 @@ func (p *fileSourceInfoProvider) getFreshProperties() (richSMBPropertyHolder, er // cached because we use it for both GetSMBProperties and GetSDDL, and in some cases (e.g. small files, // or enough transactions that transaction costs matter) saving IOPS matters -func (p *fileSourceInfoProvider) getCachedProperties() (richSMBPropertyHolder, error) { +func (p *fileSourceInfoProvider) getCachedProperties() (shareFilePropertyProvider, error) { var err error p.cacheOnce.Do(func() { @@ -98,9 +203,7 @@ func (p *fileSourceInfoProvider) getCachedProperties() (richSMBPropertyHolder, e } func (p *fileSourceInfoProvider) GetSMBProperties() (TypedSMBPropertyHolder, error) { - cachedProps, err := p.getCachedProperties() - - return &azfile.SMBPropertyAdapter{PropertySource: cachedProps}, err + return p.getCachedProperties() } func (p *fileSourceInfoProvider) GetSDDL() (string, error) { @@ -116,13 +219,16 @@ func (p *fileSourceInfoProvider) GetSDDL() (string, error) { // Call into SIPM and grab our SDDL string. sipm := p.jptm.SecurityInfoPersistenceManager() - presigned, err := p.PreSignedSourceURL() + source, err := p.PreSignedSourceURL() + if err != nil { + return "", err + } + fURLParts, err := file.ParseURL(source) if err != nil { return "", err } - fURLParts := azfile.NewFileURLParts(*presigned) fURLParts.DirectoryOrFilePath = "" - sddlString, err := sipm.GetSDDLFromID(key, fURLParts.URL(), p.jptm.SourceProviderPipeline()) + sddlString, err := sipm.GetSDDLFromID(key, fURLParts.String(), p.jptm.S2SSourceCredentialInfo(), p.jptm.CredentialOpOptions(), p.jptm.S2SSourceClientOptions()) return sddlString, err } @@ -145,22 +251,21 @@ func (p *fileSourceInfoProvider) Properties() (*SrcProperties, error) { switch p.EntityType() { case common.EEntityType.File(): - fileProps := properties.(contentPropsProvider) srcProperties = &SrcProperties{ SrcHTTPHeaders: common.ResourceHTTPHeaders{ - ContentType: fileProps.ContentType(), - ContentEncoding: fileProps.ContentEncoding(), - ContentDisposition: fileProps.ContentDisposition(), - ContentLanguage: fileProps.ContentLanguage(), - CacheControl: fileProps.CacheControl(), - ContentMD5: fileProps.ContentMD5(), + ContentType: properties.ContentType(), + ContentEncoding: properties.ContentEncoding(), + ContentDisposition: properties.ContentDisposition(), + ContentLanguage: properties.ContentLanguage(), + CacheControl: properties.CacheControl(), + ContentMD5: properties.ContentMD5(), }, - SrcMetadata: common.FromAzFileMetadataToCommonMetadata(properties.NewMetadata()), + SrcMetadata: properties.Metadata(), } case common.EEntityType.Folder(): srcProperties = &SrcProperties{ SrcHTTPHeaders: common.ResourceHTTPHeaders{}, // no contentType etc for folders - SrcMetadata: common.FromAzFileMetadataToCommonMetadata(properties.NewMetadata()), + SrcMetadata: properties.Metadata(), } default: panic("unsupported entity type") diff --git a/ste/sourceInfoProvider-GCP.go b/ste/sourceInfoProvider-GCP.go index 73f6ded67..a95c87765 100644 --- a/ste/sourceInfoProvider-GCP.go +++ b/ste/sourceInfoProvider-GCP.go @@ -60,11 +60,10 @@ func newGCPSourceInfoProvider(jptm IJobPartTransferMgr) (ISourceInfoProvider, er return &p, nil } -func (p *gcpSourceInfoProvider) PreSignedSourceURL() (*url.URL, error) { - +func (p *gcpSourceInfoProvider) PreSignedSourceURL() (string, error) { conf, err := google.JWTConfigFromJSON(jsonKey) if err != nil { - return nil, fmt.Errorf("Could not get config from json key. Error: %v", err) + return "", fmt.Errorf("Could not get config from json key. Error: %v", err) } opts := &gcpUtils.SignedURLOptions{ Scheme: gcpUtils.SigningSchemeV4, @@ -76,15 +75,10 @@ func (p *gcpSourceInfoProvider) PreSignedSourceURL() (*url.URL, error) { u, err := gcpUtils.SignedURL(p.gcpURLParts.BucketName, p.gcpURLParts.ObjectKey, opts) if err != nil { - return nil, fmt.Errorf("Unable to Generate Signed URL for given GCP Object: %v", err) - } - - parsedURL, err := url.Parse(u) - if err != nil { - return nil, fmt.Errorf("Unable to parse signed URL: %v", err) + return "", fmt.Errorf("Unable to Generate Signed URL for given GCP Object: %v", err) } - return parsedURL, nil + return u, nil } func (p *gcpSourceInfoProvider) Properties() (*SrcProperties, error) { diff --git a/ste/sourceInfoProvider-Local_linux.go b/ste/sourceInfoProvider-Local_linux.go index 55a160d99..605787c82 100644 --- a/ste/sourceInfoProvider-Local_linux.go +++ b/ste/sourceInfoProvider-Local_linux.go @@ -5,9 +5,9 @@ package ste import ( "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-azcopy/v10/sddl" - "github.com/Azure/azure-storage-file-go/azfile" "golang.org/x/sys/unix" "strings" "time" @@ -227,7 +227,7 @@ func (hi HandleInfo) FileLastWriteTime() time.Time { return time.Unix(0, hi.LastWriteTime.Nanoseconds()) } -func (hi HandleInfo) FileAttributes() azfile.FileAttributeFlags { +func (hi HandleInfo) FileAttributes() (*file.NTFSFileAttributes, error) { // Can't shorthand it because the function name overrides. - return azfile.FileAttributeFlags(hi.ByHandleFileInformation.FileAttributes) + return FileAttributesFromUint32(hi.ByHandleFileInformation.FileAttributes) } diff --git a/ste/sourceInfoProvider-Local_windows.go b/ste/sourceInfoProvider-Local_windows.go index 0439fb5e8..c68160e91 100644 --- a/ste/sourceInfoProvider-Local_windows.go +++ b/ste/sourceInfoProvider-Local_windows.go @@ -4,6 +4,7 @@ package ste import ( "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" "os" "strings" "syscall" @@ -14,7 +15,6 @@ import ( "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-file-go/azfile" "golang.org/x/sys/windows" "github.com/Azure/azure-storage-azcopy/v10/sddl" @@ -129,7 +129,7 @@ func (hi HandleInfo) FileLastWriteTime() time.Time { return time.Unix(0, hi.LastWriteTime.Nanoseconds()) } -func (hi HandleInfo) FileAttributes() azfile.FileAttributeFlags { +func (hi HandleInfo) FileAttributes() (*file.NTFSFileAttributes, error) { // Can't shorthand it because the function name overrides. - return azfile.FileAttributeFlags(hi.ByHandleFileInformation.FileAttributes) + return FileAttributesFromUint32(hi.ByHandleFileInformation.FileAttributes) } diff --git a/ste/sourceInfoProvider-S3.go b/ste/sourceInfoProvider-S3.go index 39a86105f..04036966d 100644 --- a/ste/sourceInfoProvider-S3.go +++ b/ste/sourceInfoProvider-S3.go @@ -86,11 +86,15 @@ func newS3SourceInfoProvider(jptm IJobPartTransferMgr) (ISourceInfoProvider, err return &p, nil } -func (p *s3SourceInfoProvider) PreSignedSourceURL() (*url.URL, error) { +func (p *s3SourceInfoProvider) PreSignedSourceURL() (string, error) { if p.credType == common.ECredentialType.S3PublicBucket() { - return p.rawSourceURL, nil + return p.rawSourceURL.String(), nil } - return p.s3Client.PresignedGetObject(p.s3URLPart.BucketName, p.s3URLPart.ObjectKey, defaultPresignExpires, url.Values{}) + source, err := p.s3Client.PresignedGetObject(p.s3URLPart.BucketName, p.s3URLPart.ObjectKey, defaultPresignExpires, url.Values{}) + if err != nil { + return "", err + } + return source.String(), nil } func (p *s3SourceInfoProvider) Properties() (*SrcProperties, error) { diff --git a/ste/sourceInfoProvider.go b/ste/sourceInfoProvider.go index e153c48cb..ff833b2d2 100644 --- a/ste/sourceInfoProvider.go +++ b/ste/sourceInfoProvider.go @@ -21,15 +21,12 @@ package ste import ( - "net/url" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" "os" "time" - "github.com/Azure/azure-storage-file-go/azfile" - "github.com/Azure/azure-storage-azcopy/v10/common" - - "github.com/Azure/azure-storage-blob-go/azblob" ) // ISourceInfoProvider is the abstraction of generic source info provider which provides source's properties. @@ -56,7 +53,7 @@ type IRemoteSourceInfoProvider interface { ISourceInfoProvider // SourceURL returns source's URL. - PreSignedSourceURL() (*url.URL, error) + PreSignedSourceURL() (string, error) // SourceSize returns size of source SourceSize() int64 @@ -72,16 +69,16 @@ type IBlobSourceInfoProvider interface { IRemoteSourceInfoProvider // BlobTier returns source's blob tier. - BlobTier() azblob.AccessTierType + BlobTier() blob.AccessTier // BlobType returns source's blob type. - BlobType() azblob.BlobType + BlobType() blob.BlobType } type TypedSMBPropertyHolder interface { FileCreationTime() time.Time FileLastWriteTime() time.Time - FileAttributes() azfile.FileAttributeFlags + FileAttributes() (*file.NTFSFileAttributes, error) } type ISMBPropertyBearingSourceInfoProvider interface { @@ -125,13 +122,8 @@ func newDefaultRemoteSourceInfoProvider(jptm IJobPartTransferMgr) (*defaultRemot return &defaultRemoteSourceInfoProvider{jptm: jptm, transferInfo: jptm.Info()}, nil } -func (p *defaultRemoteSourceInfoProvider) PreSignedSourceURL() (*url.URL, error) { - srcURL, err := url.Parse(p.transferInfo.Source) - if err != nil { - return nil, err - } - - return srcURL, nil +func (p *defaultRemoteSourceInfoProvider) PreSignedSourceURL() (string, error) { + return p.transferInfo.Source, nil } func (p *defaultRemoteSourceInfoProvider) Properties() (*SrcProperties, error) { diff --git a/ste/xfer-anyToRemote-file.go b/ste/xfer-anyToRemote-file.go index 7a2e714a3..ba05dfbe3 100644 --- a/ste/xfer-anyToRemote-file.go +++ b/ste/xfer-anyToRemote-file.go @@ -25,7 +25,7 @@ import ( "crypto/md5" "errors" "fmt" - "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "hash" "net/http" "net/url" @@ -37,6 +37,12 @@ import ( "github.com/Azure/azure-storage-azcopy/v10/common" ) +// IBlobClient is an interface to allow ValidateTier to accept any type of client +type IBlobClient interface { + URL() string + GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOptions) (blob.GetAccountInfoResponse, error) +} + // This code for blob tier safety is _not_ safe for multiple jobs at once. // That's alright, but it's good to know on the off chance. // This sync.Once is present to ensure we output information about a S2S access tier preservation failure to stdout once @@ -53,9 +59,9 @@ var tierSetPossibleFail bool var getDestAccountInfo sync.Once var getDestAccountInfoError error -func prepareDestAccountInfo(bURL azblob.BlobURL, jptm IJobPartTransferMgr, ctx context.Context, mustGet bool) { +func prepareDestAccountInfo(client IBlobClient, jptm IJobPartTransferMgr, ctx context.Context, mustGet bool) { getDestAccountInfo.Do(func() { - infoResp, err := bURL.GetAccountInfo(ctx) + infoResp, err := client.GetAccountInfo(ctx, nil) if err != nil { // If GetAccountInfo fails, this transfer should fail because we lack at least one available permission // UNLESS the user is using OAuth. In which case, the account owner can still get the info. @@ -71,8 +77,10 @@ func prepareDestAccountInfo(bURL azblob.BlobURL, jptm IJobPartTransferMgr, ctx c destAccountKind = "failget" } } else { - destAccountSKU = string(infoResp.SkuName()) - destAccountKind = string(infoResp.AccountKind()) + sku := infoResp.SKUName + kind := infoResp.AccountKind + destAccountSKU = string(*sku) + destAccountKind = string(*kind) } }) @@ -82,7 +90,7 @@ func prepareDestAccountInfo(bURL azblob.BlobURL, jptm IJobPartTransferMgr, ctx c } // // TODO: Infer availability based upon blob size as well, for premium page blobs. -func BlobTierAllowed(destTier azblob.AccessTierType) bool { +func BlobTierAllowed(destTier blob.AccessTier) bool { // If we failed to get the account info, just return true. // This is because we can't infer whether it's possible or not, and the setTier operation could possibly succeed (or fail) if tierSetPossibleFail { @@ -116,22 +124,25 @@ func BlobTierAllowed(destTier azblob.AccessTierType) bool { // Standard storage account. If it's Hot, Cool, or Archive, we're A-OK. // Page blobs, however, don't have an access tier on Standard accounts. // However, this is also OK, because the pageblob sender code prevents us from using a standard access tier type. - return destTier == azblob.AccessTierArchive || destTier == azblob.AccessTierCool || destTier == common.EBlockBlobTier.Cold().ToAccessTierType() || destTier == azblob.AccessTierHot + return destTier == blob.AccessTierArchive || destTier == blob.AccessTierCool || destTier == common.EBlockBlobTier.Cold().ToAccessTierType() || destTier == blob.AccessTierHot } } -func ValidateTier(jptm IJobPartTransferMgr, blobTier azblob.AccessTierType, blobURL azblob.BlobURL, ctx context.Context, performQuietly bool) (isValid bool) { +func ValidateTier(jptm IJobPartTransferMgr, blobTier blob.AccessTier, client IBlobClient, ctx context.Context, performQuietly bool) (isValid bool) { - if jptm.IsLive() && blobTier != azblob.AccessTierNone { + if jptm.IsLive() && blobTier != "" { // Let's check if we can confirm we'll be able to check the destination blob's account info. // A SAS token, even with write-only permissions is enough. OR, OAuth with the account owner. // We can't guess that last information, so we'll take a gamble and try to get account info anyway. // User delegation SAS is the same as OAuth - destParts := azblob.NewBlobURLParts(blobURL.URL()) - mustGet := destParts.SAS.Encode() != "" && destParts.SAS.SignedTid() == "" + destParts, err := blob.ParseURL(client.URL()) + if err != nil { + return false + } + mustGet := destParts.SAS.Encode() != "" && destParts.SAS.SignedTID() == "" - prepareDestAccountInfo(blobURL, jptm, ctx, mustGet) + prepareDestAccountInfo(client, jptm, ctx, mustGet) tierAvailable := BlobTierAllowed(blobTier) if tierAvailable { @@ -558,9 +569,9 @@ func epilogueWithCleanupSendToRemote(jptm IJobPartTransferMgr, s sender, sip ISo if shouldCheckLength { if err != nil { wrapped := fmt.Errorf("Could not read destination length. %w", err) - jptm.FailActiveSend(common.IffString(isS2SCopier, "S2S ", "Upload ")+"Length check: Get destination length", wrapped) + jptm.FailActiveSend(common.Iff(isS2SCopier, "S2S ", "Upload ")+"Length check: Get destination length", wrapped) } else if destLength != jptm.Info().SourceSize { - jptm.FailActiveSend(common.IffString(isS2SCopier, "S2S ", "Upload ")+"Length check", errors.New("destination length does not match source length")) + jptm.FailActiveSend(common.Iff(isS2SCopier, "S2S ", "Upload ")+"Length check", errors.New("destination length does not match source length")) } } } diff --git a/ste/xfer-deleteBlob.go b/ste/xfer-deleteBlob.go index 69b165acb..d1f1eaada 100644 --- a/ste/xfer-deleteBlob.go +++ b/ste/xfer-deleteBlob.go @@ -1,15 +1,17 @@ package ste import ( + "errors" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "net/http" - "net/url" "strings" "sync" "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" ) var explainedSkippedRemoveOnce sync.Once @@ -33,9 +35,7 @@ func doDeleteBlob(jptm IJobPartTransferMgr, p pipeline.Pipeline) { info := jptm.Info() // Get the source blob url of blob to delete - u, _ := url.Parse(info.Source) - - srcBlobURL := azblob.NewBlobURL(*u, p) + blobClient := common.CreateBlobClient(info.Source, jptm.CredentialInfo(), jptm.CredentialOpOptions(), jptm.ClientOptions()) // Internal function which checks the transfer status and logs the msg respectively. // Sets the transfer status and Report Transfer as Done. @@ -60,35 +60,31 @@ func doDeleteBlob(jptm IJobPartTransferMgr, p pipeline.Pipeline) { // note: if deleteSnapshotsOption is 'only', which means deleting all the snapshots but keep the root blob // we still count this delete operation as successful since we accomplished the desired outcome - err := error(nil) - if jptm.PermanentDeleteOption().ToPermanentDeleteOptionType() == azblob.BlobDeletePermanent { - _, err = srcBlobURL.PermanentDelete(jptm.Context(), jptm.DeleteSnapshotsOption().ToDeleteSnapshotsOptionType(), azblob.BlobAccessConditions{}) - } else { - _, err = srcBlobURL.Delete(jptm.Context(), jptm.DeleteSnapshotsOption().ToDeleteSnapshotsOptionType(), azblob.BlobAccessConditions{}) - } + _, err := blobClient.Delete(jptm.Context(), &blob.DeleteOptions{ + DeleteSnapshots: jptm.DeleteSnapshotsOption().ToDeleteSnapshotsOptionType(), + BlobDeleteType: jptm.PermanentDeleteOption().ToPermanentDeleteOptionType(), + }) if err != nil { - if strErr, ok := err.(azblob.StorageError); ok { + var respErr *azcore.ResponseError + if errors.As(err, &respErr) { // if the delete failed with err 404, i.e resource not found, then mark the transfer as success. - if strErr.Response().StatusCode == http.StatusNotFound { + if respErr.StatusCode == http.StatusNotFound { transferDone(common.ETransferStatus.Success(), nil) return } - // if the delete failed because the blob has snapshots, then skip it - if strErr.Response().StatusCode == http.StatusConflict && strErr.ServiceCode() == azblob.ServiceCodeSnapshotsPresent { + if respErr.StatusCode == http.StatusConflict && respErr.ErrorCode == string(bloberror.SnapshotsPresent) { transferDone(common.ETransferStatus.SkippedBlobHasSnapshots(), nil) return } - // If the status code was 403, it means there was an authentication error and we exit. // User can resume the job if completely ordered with a new sas. - if strErr.Response().StatusCode == http.StatusForbidden { + if respErr.StatusCode == http.StatusForbidden { errMsg := fmt.Sprintf("Authentication Failed. The SAS is not correct or expired or does not have the correct permission %s", err.Error()) jptm.Log(pipeline.LogError, errMsg) common.GetLifecycleMgr().Error(errMsg) } } - // in all other cases, make the transfer as failed transferDone(common.ETransferStatus.Failed(), err) } else { diff --git a/ste/xfer-deleteFile.go b/ste/xfer-deleteFile.go index ca16c976b..c62187faa 100644 --- a/ste/xfer-deleteFile.go +++ b/ste/xfer-deleteFile.go @@ -2,17 +2,20 @@ package ste import ( "context" + "errors" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" "net/http" "net/url" "strings" "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-file-go/azfile" ) -func DeleteFile(jptm IJobPartTransferMgr, p pipeline.Pipeline, pacer pacer) { +func DeleteFile(jptm IJobPartTransferMgr, _ pipeline.Pipeline, _ pacer) { // If the transfer was cancelled, then reporting transfer as done and increasing the bytestransferred by the size of the source. if jptm.WasCanceled() { @@ -21,7 +24,8 @@ func DeleteFile(jptm IJobPartTransferMgr, p pipeline.Pipeline, pacer pacer) { } info := jptm.Info() - srcUrl, _ := url.Parse(info.Source) + source := info.Source + srcURL, _ := url.Parse(source) // Register existence with the deletion manager. Do it now, before we make the chunk funcs, // to maximize the extent to which the manager knows about as many children as possible (i.e. @@ -31,18 +35,17 @@ func DeleteFile(jptm IJobPartTransferMgr, p pipeline.Pipeline, pacer pacer) { // and then we find more children in the plan files. Such failed attempts are harmless, but cause // unnecessary network round trips. // We must do this for all entity types, because even folders are children of their parents - jptm.FolderDeletionManager().RecordChildExists(srcUrl) + jptm.FolderDeletionManager().RecordChildExists(srcURL) if info.EntityType == common.EEntityType.Folder() { - - au := azfile.NewFileURLParts(*srcUrl) - isFileShareRoot := au.DirectoryOrFilePath == "" + fileURLParts, _ := file.ParseURL(source) + isFileShareRoot := fileURLParts.DirectoryOrFilePath == "" if !isFileShareRoot { jptm.LogAtLevelForCurrentTransfer(pipeline.LogInfo, "Queuing folder, to be deleted after it's children are deleted") jptm.FolderDeletionManager().RequestDeletion( - srcUrl, + srcURL, func(ctx context.Context, logger common.ILogger) bool { - return doDeleteFolder(ctx, info.Source, p, jptm, logger) + return doDeleteFolder(ctx, source, jptm, logger) }, ) } @@ -58,26 +61,25 @@ func DeleteFile(jptm IJobPartTransferMgr, p pipeline.Pipeline, pacer pacer) { } else { // schedule the work as a chunk, so it will run on the main goroutine pool, instead of the // smaller "transfer initiation pool", where this code runs. - id := common.NewChunkID(info.Source, 0, 0) - cf := createChunkFunc(true, jptm, id, func() { doDeleteFile(jptm, p) }) + id := common.NewChunkID(source, 0, 0) + cf := createChunkFunc(true, jptm, id, func() { doDeleteFile(jptm) }) jptm.ScheduleChunks(cf) } } -func doDeleteFile(jptm IJobPartTransferMgr, p pipeline.Pipeline) { - +func doDeleteFile(jptm IJobPartTransferMgr) { info := jptm.Info() - // Get the source file url of file to delete - srcUrl, _ := url.Parse(info.Source) + source := info.Source + srcURL, _ := url.Parse(source) - srcFileUrl := azfile.NewFileURL(*srcUrl, p) + srcFileClient := common.CreateShareFileClient(source, jptm.CredentialInfo(), jptm.CredentialOpOptions(), jptm.ClientOptions()) // Internal function which checks the transfer status and logs the msg respectively. // Sets the transfer status and Report Transfer as Done. // Internal function is created to avoid redundancy of the above steps from several places in the api. transferDone := func(status common.TransferStatus, err error) { if status == common.ETransferStatus.Success() { - jptm.FolderDeletionManager().RecordChildDeleted(srcUrl) + jptm.FolderDeletionManager().RecordChildDeleted(srcURL) // TODO: doing this only on success raises the possibility of the // FolderDeletionManager's internal map growing rather large if there are lots of failures // on a big folder tree. Is living with that preferable to the "incorrectness" of calling @@ -101,19 +103,20 @@ func doDeleteFile(jptm IJobPartTransferMgr, p pipeline.Pipeline) { // Delete the source file helper := &azureFileSenderBase{} err := helper.DoWithOverrideReadOnly(jptm.Context(), - func() (interface{}, error) { return srcFileUrl.Delete(jptm.Context()) }, - srcFileUrl, + func() (interface{}, error) { return srcFileClient.Delete(jptm.Context(), nil) }, + srcFileClient, jptm.GetForceIfReadOnly()) if err != nil { - // If the delete failed with err 404, i.e resource not found, then mark the transfer as success. - if strErr, ok := err.(azfile.StorageError); ok { - if strErr.Response().StatusCode == http.StatusNotFound { + var respErr *azcore.ResponseError + if errors.As(err, &respErr) { + // If the delete failed with err 404, i.e resource not found, then mark the transfer as success. + if respErr.StatusCode == http.StatusNotFound { transferDone(common.ETransferStatus.Success(), nil) return } // If the status code was 403, it means there was an authentication error and we exit. // User can resume the job if completely ordered with a new sas. - if strErr.Response().StatusCode == http.StatusForbidden { + if respErr.StatusCode == http.StatusForbidden { errMsg := fmt.Sprintf("Authentication Failed. The SAS is not correct or expired or does not have the correct permission %s", err.Error()) jptm.Log(pipeline.LogError, errMsg) common.GetLifecycleMgr().Error(errMsg) @@ -125,35 +128,34 @@ func doDeleteFile(jptm IJobPartTransferMgr, p pipeline.Pipeline) { } } -func doDeleteFolder(ctx context.Context, folder string, p pipeline.Pipeline, jptm IJobPartTransferMgr, logger common.ILogger) bool { - - u, err := url.Parse(folder) +func doDeleteFolder(ctx context.Context, folder string, jptm IJobPartTransferMgr, logger common.ILogger) bool { + fileURLParts, err := file.ParseURL(folder) if err != nil { return false } - loggableName := u.Path + loggableName := fileURLParts.DirectoryOrFilePath logger.Log(pipeline.LogDebug, "About to attempt to delete folder "+loggableName) - dirUrl := azfile.NewDirectoryURL(*u, p) + srcDirClient := common.CreateShareDirectoryClient(folder, jptm.CredentialInfo(), jptm.CredentialOpOptions(), jptm.ClientOptions()) helper := &azureFileSenderBase{} err = helper.DoWithOverrideReadOnly(ctx, - func() (interface{}, error) { return dirUrl.Delete(ctx) }, - dirUrl, + func() (interface{}, error) { return srcDirClient.Delete(ctx, nil) }, + srcDirClient, jptm.GetForceIfReadOnly()) if err == nil { logger.Log(pipeline.LogInfo, "Empty folder deleted "+loggableName) // not using capitalized DELETE SUCCESSFUL here because we can't use DELETE ERROR for folder delete failures (since there may be a retry if we delete more files, but we don't know that at time of logging) return true } - - // If the delete failed with err 404, i.e resource not found, then consider the deletion a success. (It's already gone) - if strErr, ok := err.(azfile.StorageError); ok { - if strErr.Response().StatusCode == http.StatusNotFound { + var respErr *azcore.ResponseError + if errors.As(err, &respErr) { + // If the delete failed with err 404, i.e resource not found, then consider the deletion a success. (It's already gone) + if respErr.StatusCode == http.StatusNotFound { logger.Log(pipeline.LogDebug, "Folder already gone before call to delete "+loggableName) return true } - if strErr.ServiceCode() == azfile.ServiceCodeDirectoryNotEmpty { + if fileerror.HasCode(err, fileerror.DirectoryNotEmpty) { logger.Log(pipeline.LogInfo, "Folder not deleted because it's not empty yet. Will retry if this job deletes more files from it. Folder name: "+loggableName) return false } diff --git a/ste/xfer-setProperties.go b/ste/xfer-setProperties.go index a5d22451e..805937c74 100644 --- a/ste/xfer-setProperties.go +++ b/ste/xfer-setProperties.go @@ -1,17 +1,18 @@ package ste import ( + "errors" "fmt" "github.com/Azure/azure-pipeline-go/pipeline" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" "net/http" - "net/url" "strings" ) -func SetProperties(jptm IJobPartTransferMgr, p pipeline.Pipeline, pacer pacer) { +func SetProperties(jptm IJobPartTransferMgr, _ pipeline.Pipeline, _ pacer) { // If the transfer was cancelled, then reporting transfer as done and increasing the bytes transferred by the size of the source. if jptm.WasCanceled() { jptm.ReportTransferDone() @@ -25,11 +26,11 @@ func SetProperties(jptm IJobPartTransferMgr, p pipeline.Pipeline, pacer pacer) { to := jptm.FromTo() switch to.From() { case common.ELocation.Blob(): - setPropertiesBlob(jptm, p) + setPropertiesBlob(jptm) case common.ELocation.BlobFS(): - setPropertiesBlobFS(jptm, p) + setPropertiesBlobFS(jptm) case common.ELocation.File(): - setPropertiesFile(jptm, p) + setPropertiesFile(jptm) default: panic("Attempting set-properties on invalid location: " + to.From().String()) } @@ -37,12 +38,8 @@ func SetProperties(jptm IJobPartTransferMgr, p pipeline.Pipeline, pacer pacer) { jptm.ScheduleChunks(cf) } -func setPropertiesBlob(jptm IJobPartTransferMgr, p pipeline.Pipeline) { +func setPropertiesBlob(jptm IJobPartTransferMgr) { info := jptm.Info() - // Get the source blob url of blob to set properties on - u, _ := url.Parse(info.Source) - srcBlobURL := azblob.NewBlobURL(*u, p) - // Internal function which checks the transfer status and logs the msg respectively. // Sets the transfer status and Reports Transfer as Done. // Internal function is created to avoid redundancy of the above steps from several places in the api. @@ -58,6 +55,8 @@ func setPropertiesBlob(jptm IJobPartTransferMgr, p pipeline.Pipeline) { jptm.ReportTransferDone() } + srcBlobClient := common.CreateBlobClient(info.Source, jptm.CredentialInfo(), jptm.CredentialOpOptions(), jptm.ClientOptions()) + PropertiesToTransfer := jptm.PropertiesToTransfer() _, metadata, blobTags, _ := jptm.ResourceDstData(nil) @@ -66,12 +65,14 @@ func setPropertiesBlob(jptm IJobPartTransferMgr, p pipeline.Pipeline) { blockBlobTier, pageBlobTier := jptm.BlobTiers() var err error = nil - if jptm.Info().SrcBlobType == azblob.BlobBlockBlob && blockBlobTier != common.EBlockBlobTier.None() && ValidateTier(jptm, blockBlobTier.ToAccessTierType(), srcBlobURL, jptm.Context(), true) { - _, err = srcBlobURL.SetTier(jptm.Context(), blockBlobTier.ToAccessTierType(), azblob.LeaseAccessConditions{}, rehydratePriority) + if jptm.Info().SrcBlobType == blob.BlobTypeBlockBlob && blockBlobTier != common.EBlockBlobTier.None() && ValidateTier(jptm, blockBlobTier.ToAccessTierType(), srcBlobClient, jptm.Context(), true) { + _, err = srcBlobClient.SetTier(jptm.Context(), blockBlobTier.ToAccessTierType(), + &blob.SetTierOptions{RehydratePriority: &rehydratePriority}) } // cannot return true for >1, therefore only one of these will run - if jptm.Info().SrcBlobType == azblob.BlobPageBlob && pageBlobTier != common.EPageBlobTier.None() && ValidateTier(jptm, pageBlobTier.ToAccessTierType(), srcBlobURL, jptm.Context(), true) { - _, err = srcBlobURL.SetTier(jptm.Context(), pageBlobTier.ToAccessTierType(), azblob.LeaseAccessConditions{}, rehydratePriority) + if jptm.Info().SrcBlobType == blob.BlobTypePageBlob && pageBlobTier != common.EPageBlobTier.None() && ValidateTier(jptm, pageBlobTier.ToAccessTierType(), srcBlobClient, jptm.Context(), true) { + _, err = srcBlobClient.SetTier(jptm.Context(), pageBlobTier.ToAccessTierType(), + &blob.SetTierOptions{RehydratePriority: &rehydratePriority}) } if err != nil { @@ -82,15 +83,15 @@ func setPropertiesBlob(jptm IJobPartTransferMgr, p pipeline.Pipeline) { } if PropertiesToTransfer.ShouldTransferMetaData() { - _, err := srcBlobURL.SetMetadata(jptm.Context(), metadata.ToAzBlobMetadata(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) - //TODO the canonical thingi in this is changing key value to upper case. How to go around it? + _, err := srcBlobClient.SetMetadata(jptm.Context(), metadata, nil) + //TODO the canonical thing in this is changing key value to upper case. How to go around it? if err != nil { errorHandlerForXferSetProperties(err, jptm, transferDone) return } } if PropertiesToTransfer.ShouldTransferBlobTags() { - _, err := srcBlobURL.SetTags(jptm.Context(), nil, nil, nil, blobTags.ToAzBlobTagsMap()) + _, err := srcBlobClient.SetTags(jptm.Context(), blobTags, nil) if err != nil { errorHandlerForXferSetProperties(err, jptm, transferDone) return @@ -100,12 +101,8 @@ func setPropertiesBlob(jptm IJobPartTransferMgr, p pipeline.Pipeline) { transferDone(common.ETransferStatus.Success(), nil) } -func setPropertiesBlobFS(jptm IJobPartTransferMgr, p pipeline.Pipeline) { +func setPropertiesBlobFS(jptm IJobPartTransferMgr) { info := jptm.Info() - // Get the source blob url of blob to delete - u, _ := url.Parse(info.Source) - srcBlobURL := azblob.NewBlobURL(*u, p) - // Internal function which checks the transfer status and logs the msg respectively. // Sets the transfer status and Report Transfer as Done. // Internal function is created to avoid redundancy of the above steps from several places in the api. @@ -121,6 +118,8 @@ func setPropertiesBlobFS(jptm IJobPartTransferMgr, p pipeline.Pipeline) { jptm.ReportTransferDone() } + srcBlobClient := common.CreateBlobClient(info.Source, jptm.CredentialInfo(), jptm.CredentialOpOptions(), jptm.ClientOptions()) + PropertiesToTransfer := jptm.PropertiesToTransfer() _, metadata, blobTags, _ := jptm.ResourceDstData(nil) @@ -128,8 +127,9 @@ func setPropertiesBlobFS(jptm IJobPartTransferMgr, p pipeline.Pipeline) { rehydratePriority := info.RehydratePriority _, pageBlobTier := jptm.BlobTiers() var err error = nil - if ValidateTier(jptm, pageBlobTier.ToAccessTierType(), srcBlobURL, jptm.Context(), false) { - _, err = srcBlobURL.SetTier(jptm.Context(), pageBlobTier.ToAccessTierType(), azblob.LeaseAccessConditions{}, rehydratePriority) + if ValidateTier(jptm, pageBlobTier.ToAccessTierType(), srcBlobClient, jptm.Context(), false) { + _, err = srcBlobClient.SetTier(jptm.Context(), pageBlobTier.ToAccessTierType(), + &blob.SetTierOptions{RehydratePriority: &rehydratePriority}) } if err != nil { @@ -140,14 +140,14 @@ func setPropertiesBlobFS(jptm IJobPartTransferMgr, p pipeline.Pipeline) { } if PropertiesToTransfer.ShouldTransferMetaData() { - _, err := srcBlobURL.SetMetadata(jptm.Context(), metadata.ToAzBlobMetadata(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) + _, err := srcBlobClient.SetMetadata(jptm.Context(), metadata, nil) if err != nil { errorHandlerForXferSetProperties(err, jptm, transferDone) return } } if PropertiesToTransfer.ShouldTransferBlobTags() { - _, err := srcBlobURL.SetTags(jptm.Context(), nil, nil, nil, blobTags.ToAzBlobTagsMap()) + _, err := srcBlobClient.SetTags(jptm.Context(), blobTags, nil) if err != nil { errorHandlerForXferSetProperties(err, jptm, transferDone) return @@ -158,11 +158,9 @@ func setPropertiesBlobFS(jptm IJobPartTransferMgr, p pipeline.Pipeline) { transferDone(common.ETransferStatus.Success(), nil) } -func setPropertiesFile(jptm IJobPartTransferMgr, p pipeline.Pipeline) { +func setPropertiesFile(jptm IJobPartTransferMgr) { info := jptm.Info() - u, _ := url.Parse(info.Source) - srcFileURL := azfile.NewFileURL(*u, p) - _ = srcFileURL + srcFileClient := common.CreateShareFileClient(info.Source, jptm.CredentialInfo(), jptm.CredentialOpOptions(), jptm.ClientOptions()) // Internal function which checks the transfer status and logs the msg respectively. // Sets the transfer status and Report Transfer as Done. // Internal function is created to avoid redundancy of the above steps from several places in the api. @@ -187,7 +185,7 @@ func setPropertiesFile(jptm IJobPartTransferMgr, p pipeline.Pipeline) { transferDone(common.ETransferStatus.Failed(), err) } if PropertiesToTransfer.ShouldTransferMetaData() { - _, err := srcFileURL.SetMetadata(jptm.Context(), metadata.ToAzFileMetadata()) + _, err := srcFileClient.SetMetadata(jptm.Context(), &file.SetMetadataOptions{Metadata: metadata}) if err != nil { errorHandlerForXferSetProperties(err, jptm, transferDone) return @@ -198,15 +196,14 @@ func setPropertiesFile(jptm IJobPartTransferMgr, p pipeline.Pipeline) { } func errorHandlerForXferSetProperties(err error, jptm IJobPartTransferMgr, transferDone func(status common.TransferStatus, err error)) { - if strErr, ok := err.(azblob.StorageError); ok { - + var respErr *azcore.ResponseError + if errors.As(err, &respErr) && respErr.StatusCode == http.StatusForbidden { // If the status code was 403, it means there was an authentication error, and we exit. // User can resume the job if completely ordered with a new sas. - if strErr.Response().StatusCode == http.StatusForbidden { - errMsg := fmt.Sprintf("Authentication Failed. The SAS is not correct or expired or does not have the correct permission %s", err.Error()) - jptm.Log(pipeline.LogError, errMsg) - common.GetLifecycleMgr().Error(errMsg) - } + errMsg := fmt.Sprintf("Authentication Failed. The SAS is not correct or expired or does not have the correct permission %s", err.Error()) + jptm.Log(pipeline.LogError, errMsg) + common.GetLifecycleMgr().Error(errMsg) + // TODO : Migrate on azfile } // in all other cases, make the transfer as failed diff --git a/ste/xfer.go b/ste/xfer.go index fc8ba4295..0fd94f38f 100644 --- a/ste/xfer.go +++ b/ste/xfer.go @@ -21,14 +21,13 @@ package ste import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "path/filepath" "strings" "sync" "time" "github.com/Azure/azure-pipeline-go/pipeline" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-azcopy/v10/common" ) @@ -166,13 +165,13 @@ func computeJobXfer(fromTo common.FromTo, blobType common.BlobType) newJobXfer { } } -var inferExtensions = map[string]azblob.BlobType{ - ".vhd": azblob.BlobPageBlob, - ".vhdx": azblob.BlobPageBlob, +var inferExtensions = map[string]blob.BlobType{ + ".vhd": blob.BlobTypePageBlob, + ".vhdx": blob.BlobTypePageBlob, } // infers a blob type from the extension specified. -func inferBlobType(filename string, defaultBlobType azblob.BlobType) azblob.BlobType { +func inferBlobType(filename string, defaultBlobType blob.BlobType) blob.BlobType { if b, ok := inferExtensions[strings.ToLower(filepath.Ext(filename))]; ok { return b } diff --git a/ste/xferLogPolicy.go b/ste/xferLogPolicy.go index 3c9efeeba..b6cc5db59 100644 --- a/ste/xferLogPolicy.go +++ b/ste/xferLogPolicy.go @@ -4,11 +4,13 @@ import ( "bytes" "context" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "net/http" "net/http/httptrace" "net/url" "regexp" "runtime" + "sort" "strings" "time" @@ -292,3 +294,236 @@ func doesHeaderExistCaseInsensitive(header http.Header, key string) (bool, strin } return false, "" } + +// logPolicyOpValues is the struct containing the per-operation values +type logPolicyOpValues struct { + try int32 + start time.Time +} + +type LogOptions struct { + // TODO : Unravel LogOptions and RequestLogOptions + RequestLogOptions RequestLogOptions + LogOptions pipeline.LogOptions +} + +type logPolicy struct { + LogOptions LogOptions + disallowedHeaders map[string]struct{} + sanitizedUrlHeaders map[string]struct{} + disallowedQueryParams map[string]struct{} +} + +func (p logPolicy) Do(req *policy.Request) (*http.Response, error) { + // Get the per-operation values. These are saved in the Message's map so that they persist across each retry calling into this policy object. + var opValues logPolicyOpValues + if req.OperationValue(&opValues); opValues.start.IsZero() { + opValues.start = time.Now() // If this is the 1st try, record this operation's start time + } + opValues.try++ // The first try is #1 (not #0) + req.SetOperationValue(opValues) + + if p.LogOptions.LogOptions.ShouldLog(pipeline.LogDebug) { + b := &bytes.Buffer{} + fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", opValues.try) + p.writeRequestWithResponse(b, req, nil, nil) + p.LogOptions.LogOptions.Log(pipeline.LogInfo, b.String()) + } + + // Set the time for this particular retry operation and then Do the operation. + // The time we gather here is a measure of service responsiveness, and as such it shouldn't + // include the time taken to transfer the body. For downloads, that's easy, + // since Do returns before the body is processed. For uploads, its trickier, because + // the body transferring is inside Do. So we use an http trace, so we can time + // from the time we finished sending the request (including any body). + var endRequestWrite time.Time + haveEndWrite := false + tracedContext := httptrace.WithClientTrace(req.Raw().Context(), &httptrace.ClientTrace{ + WroteRequest: func(w httptrace.WroteRequestInfo) { + endRequestWrite = time.Now() + haveEndWrite = true + }, + }) + tryBeginAwaitResponse := time.Now() + response, err := req.Clone(tracedContext).Next() + tryEnd := time.Now() + if haveEndWrite { + tryBeginAwaitResponse = endRequestWrite // adjust to the time we really started waiting for the response + } + tryDuration := tryEnd.Sub(tryBeginAwaitResponse) + opDuration := tryEnd.Sub(opValues.start) + + logLevel, forceLog, httpError := pipeline.LogInfo, false, false // Default logging information + + // If the response took too long, we'll upgrade to warning. + if p.LogOptions.RequestLogOptions.LogWarningIfTryOverThreshold > 0 && tryDuration > p.LogOptions.RequestLogOptions.LogWarningIfTryOverThreshold { + // Log a warning if the try duration exceeded the specified threshold + logLevel, forceLog = pipeline.LogWarning, !p.LogOptions.RequestLogOptions.SyslogDisabled + } + + if err == nil { // We got a response from the service + sc := response.StatusCode + if ((sc >= 400 && sc <= 499) && sc != http.StatusNotFound && sc != http.StatusConflict && sc != http.StatusPreconditionFailed && sc != http.StatusRequestedRangeNotSatisfiable) || (sc >= 500 && sc <= 599) { + logLevel, forceLog, httpError = pipeline.LogError, !p.LogOptions.RequestLogOptions.SyslogDisabled, true // Promote to Error any 4xx (except those listed is an error) or any 5xx + } else if sc == http.StatusNotFound || sc == http.StatusConflict || sc == http.StatusPreconditionFailed || sc == http.StatusRequestedRangeNotSatisfiable { + httpError = true + } + } else if isContextCancelledError(err) { + // No point force-logging these, and probably, for clarity of the log, no point in even logging unless at debug level + // Otherwise, when lots of go-routines are running, and one fails with a real error, the rest obscure the log with their + // context canceled logging. If there's no real error, just user-requested cancellation, + // that's is visible by cancelled status shown in end-of-log summary. + logLevel, forceLog = pipeline.LogDebug, false + } else { + // This error did not get an HTTP response from the service; upgrade the severity to Error + logLevel, forceLog = pipeline.LogError, !p.LogOptions.RequestLogOptions.SyslogDisabled + } + + logBody := false + if shouldLog := p.LogOptions.LogOptions.ShouldLog(logLevel); forceLog || shouldLog { + // We're going to log this; build the string to log + b := &bytes.Buffer{} + slow := "" + if p.LogOptions.RequestLogOptions.LogWarningIfTryOverThreshold > 0 && tryDuration > p.LogOptions.RequestLogOptions.LogWarningIfTryOverThreshold { + slow = fmt.Sprintf("[SLOW >%v]", p.LogOptions.RequestLogOptions.LogWarningIfTryOverThreshold) + } + fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v%s, OpTime=%v) -- ", opValues.try, tryDuration, slow, opDuration) + if err != nil { // This HTTP request did not get a response from the service + fmt.Fprint(b, "REQUEST ERROR\n") + } else { + if logLevel == pipeline.LogError { + fmt.Fprint(b, "RESPONSE STATUS CODE ERROR\n") + logBody = true + } else { + fmt.Fprint(b, "RESPONSE SUCCESSFULLY RECEIVED\n") + } + } + if forceLog || err != nil || p.LogOptions.LogOptions.ShouldLog(pipeline.LogDebug) { + p.writeRequestWithResponse(b, req, response, err) + } else { + p.writeRequestAsOneLine(b, req) + writeActivityId(b, response) + } + + if logBody { + body := transparentlyReadBody(response) + fmt.Fprint(b, "Response Details: ", formatBody(body), "\n") // simple logging of response body, as raw XML (better than not logging it at all!) + } + + //Dropping HTTP errors as grabbing the stack is an expensive operation & fills the log too much + //for a set of harmless errors. HTTP requests ultimately will be retried. + if logLevel <= pipeline.LogError && !httpError { + b.Write(stack()) + } + msg := b.String() + + if forceLog { + pipeline.ForceLog(logLevel, msg) + } + if shouldLog { + p.LogOptions.LogOptions.Log(logLevel, msg) + } + + } + + return response, err +} + +func newLogPolicy(options LogOptions) policy.Policy { + options.RequestLogOptions = options.RequestLogOptions.defaults() + if options.LogOptions.ShouldLog == nil { + options.LogOptions.ShouldLog = func(pipeline.LogLevel) bool { return false } // No-op logger + } + if options.LogOptions.Log == nil { + options.LogOptions.Log = func(pipeline.LogLevel, string) {} // No-op logger + } + disallowedHeaders := map[string]struct{}{ + "authorization": {}, + "x-ms-encryption-key": {}, + "x-ms-copy-source-authorization": {}, + } + sanitizedUrlHeaders := map[string]struct{}{ + "x-ms-copy-source": {}, + } + + // now do the same thing for query params + disallowedQP := map[string]struct{}{ + "sig": {}, + } + return logPolicy{LogOptions: options, disallowedHeaders: disallowedHeaders, disallowedQueryParams: disallowedQP, sanitizedUrlHeaders: sanitizedUrlHeaders} +} + +const redactedValue = "REDACTED" + +func (p *logPolicy) writeRequestAsOneLine(b *bytes.Buffer, req *policy.Request) { + cpURL := *req.Raw().URL + qp := cpURL.Query() + for k := range qp { + if _, ok := p.disallowedQueryParams[strings.ToLower(k)]; ok { + qp.Set(k, redactedValue) + } + } + cpURL.RawQuery = qp.Encode() + fmt.Fprint(b, " "+req.Raw().Method+" "+cpURL.String()+"\n") +} + +// writeRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are +// not nil, then these are also written into the Buffer. +func (p *logPolicy) writeRequestWithResponse(b *bytes.Buffer, req *policy.Request, resp *http.Response, err error) { + // redact applicable query params + cpURL := *req.Raw().URL + qp := cpURL.Query() + for k := range qp { + if _, ok := p.disallowedQueryParams[strings.ToLower(k)]; ok { + qp.Set(k, redactedValue) + } + } + cpURL.RawQuery = qp.Encode() + // Write the request into the buffer. + fmt.Fprint(b, " "+req.Raw().Method+" "+cpURL.String()+"\n") + p.writeHeader(b, req.Raw().Header) + if resp != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " RESPONSE Status: "+resp.Status+"\n") + p.writeHeader(b, resp.Header) + } + if err != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n") + } +} + +// formatHeaders appends an HTTP request's or response's header into a Buffer. +func (p *logPolicy) writeHeader(b *bytes.Buffer, header http.Header) { + if len(header) == 0 { + b.WriteString(" (no headers)\n") + return + } + keys := make([]string, 0, len(header)) + // Alphabetize the headers + for k := range header { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + value := header.Get(k) + // sanitize or redact certain headers + // redact all header values in the disallow-list + if _, ok := p.disallowedHeaders[strings.ToLower(k)]; ok { + value = redactedValue + } else if _, ok := p.sanitizedUrlHeaders[strings.ToLower(k)]; ok { + u, err := url.Parse(value) + if err == nil { + rawQuery := u.RawQuery + sigRedacted, rawQuery := common.RedactSecretQueryParam(rawQuery, common.SigAzure) + xAmzSignatureRedacted, rawQuery := common.RedactSecretQueryParam(rawQuery, common.SigXAmzForAws) + + if sigRedacted || xAmzSignatureRedacted { + u.RawQuery = rawQuery + } + value = u.String() + } + } + fmt.Fprintf(b, " %s: %+v\n", k, value) + } +} diff --git a/ste/xferRetryNotificationPolicy.go b/ste/xferRetryNotificationPolicy.go index dbd030325..56e5ae2fc 100644 --- a/ste/xferRetryNotificationPolicy.go +++ b/ste/xferRetryNotificationPolicy.go @@ -23,17 +23,18 @@ package ste import ( "context" "github.com/Azure/azure-pipeline-go/pipeline" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "net/http" ) // retryNotificationReceiver should be implemented by code that wishes to be notified when a retry // happens. Such code must register itself into the context, using withRetryNotification, -// so that the retryNotificationPolicy can invoke the callback when necessary. +// so that the v1RetryNotificationPolicy can invoke the callback when necessary. type retryNotificationReceiver interface { RetryCallback() } -// withRetryNotifier returns a context that contains a retry notifier. The retryNotificationPolicy +// withRetryNotifier returns a context that contains a retry notifier. The v1RetryNotificationPolicy // will then invoke the callback when a retry happens func withRetryNotification(ctx context.Context, r retryNotificationReceiver) context.Context { return context.WithValue(ctx, retryNotifyContextKey, r) @@ -45,7 +46,7 @@ type contextKey struct { var retryNotifyContextKey = contextKey{"retryNotify"} -type retryNotificationPolicy struct { +type v1RetryNotificationPolicy struct { next pipeline.Policy } @@ -54,7 +55,7 @@ type retryNotificationPolicy struct { // (We can't just let the top-level caller look at the status of the HTTP response, because by that // time our RetryPolicy will have actually DONE the retry, so the status will be successful. That's why, if the // top level caller wants to be informed, they have to get informed by this callback mechanism.) -func (r *retryNotificationPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { +func (r *v1RetryNotificationPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { resp, err := r.next.Do(ctx, request) @@ -71,9 +72,30 @@ func (r *retryNotificationPolicy) Do(ctx context.Context, request pipeline.Reque return resp, err } -func newRetryNotificationPolicyFactory() pipeline.Factory { +func newV1RetryNotificationPolicyFactory() pipeline.Factory { return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { - r := retryNotificationPolicy{next: next} + r := v1RetryNotificationPolicy{next: next} return r.Do }) } + +type retryNotificationPolicy struct { +} + +func newRetryNotificationPolicy() policy.Policy { + return &retryNotificationPolicy{} +} + +func (r *retryNotificationPolicy) Do(req *policy.Request) (*http.Response, error) { + response, err := req.Next() // Make the request + + if response != nil && response.StatusCode == http.StatusServiceUnavailable { + // Grab the notification callback out of the context and, if its there, call it + notifier, ok := req.Raw().Context().Value(retryNotifyContextKey).(retryNotificationReceiver) + if ok { + notifier.RetryCallback() + } + } + + return response, err +} diff --git a/ste/xferRetrypolicy.go b/ste/xferRetrypolicy.go index b715ef01e..a953ffe2d 100644 --- a/ste/xferRetrypolicy.go +++ b/ste/xferRetrypolicy.go @@ -2,6 +2,8 @@ package ste import ( "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "io" "math/rand" "net" @@ -12,11 +14,10 @@ import ( "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-azcopy/v10/azbfs" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" ) // XferRetryPolicy tells the pipeline what kind of retry policy to use. See the XferRetryPolicy* constants. -// Added a new retry policy and not using the existing policy azblob.zc_retry_policy.go since there are some changes +// Added a new retry policy and not using the existing policy zc_retry_policy.go since there are some changes // in the retry policy. // Retry on all the type of network errors instead of retrying only in case of temporary or timeout errors. type XferRetryPolicy int32 @@ -293,176 +294,10 @@ func NewBFSXferRetryPolicyFactory(o XferRetryOptions) pipeline.Factory { }) } -var retrySuppressionContextKey = contextKey{"retrySuppression"} - // withNoRetryForBlob returns a context that contains a marker to say we don't want any retries to happen // Is only implemented for blob pipelines at present func withNoRetryForBlob(ctx context.Context) context.Context { - return context.WithValue(ctx, retrySuppressionContextKey, struct{}{}) - // TODO: this is fragile, in the sense that we have no way to check, here, that we are running in a pipeline that - // actually knows how to check the context for the value. Maybe add a check here, if/when we rationalize - // all our retry policies into one -} - -// TODO: Fix the separate retry policies, use Azure blob's retry policy after blob SDK with retry optimization get released. -// NewBlobXferRetryPolicyFactory creates a RetryPolicyFactory object configured using the specified options. -func NewBlobXferRetryPolicyFactory(o XferRetryOptions) pipeline.Factory { - o = o.defaults() // Force defaults to be calculated - return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { - return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) { - // Before each try, we'll select either the primary or secondary URL. - primaryTry := int32(0) // This indicates how many tries we've attempted against the primary DC - - // We only consider retrying against a secondary if we have a read request (GET/HEAD) AND this policy has a Secondary URL it can use - considerSecondary := (request.Method == http.MethodGet || request.Method == http.MethodHead) && o.retryReadsFromSecondaryHost() != "" - - // Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) - // When to retry: connection failure or temporary/timeout. NOTE: StorageError considers HTTP 500/503 as temporary & is therefore retryable - // If using a secondary: - // Even tries go against primary; odd tries go against the secondary - // For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2) - // If secondary gets a 404, don't fail, retry but future retries are only against the primary - // When retrying against a secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2)) - maxTries := o.MaxTries - if _, ok := ctx.Value(retrySuppressionContextKey).(struct{}); ok { - maxTries = 1 // retries are suppressed by the context - } - for try := int32(1); try <= maxTries; try++ { - logf("\n=====> Try=%d\n", try) - - // Determine which endpoint to try. It's primary if there is no secondary or if it is an add # attempt. - tryingPrimary := !considerSecondary || (try%2 == 1) - // Select the correct host and delay - if tryingPrimary { - primaryTry++ - delay := o.calcDelay(primaryTry) - logf("Primary try=%d, Delay=%f s\n", primaryTry, delay.Seconds()) - time.Sleep(delay) // The 1st try returns 0 delay - } else { - // For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757 - delay := time.Duration(float32(time.Second) * (rand.Float32()/2 + 0.8)) - logf("Secondary try=%d, Delay=%f s\n", try-primaryTry, delay.Seconds()) - time.Sleep(delay) // Delay with some jitter before trying secondary - } - - // Clone the original request to ensure that each try starts with the original (unmutated) request. - requestCopy := request.Copy() - - // For each try, seek to the beginning of the body stream. We do this even for the 1st try because - // the stream may not be at offset 0 when we first get it and we want the same behavior for the - // 1st try as for additional tries. - err = requestCopy.RewindBody() - common.PanicIfErr(err) - - if !tryingPrimary { - requestCopy.URL.Host = o.retryReadsFromSecondaryHost() - requestCopy.Host = o.retryReadsFromSecondaryHost() - } - - // Set the server-side timeout query parameter "timeout=[seconds]" - timeout := int32(o.TryTimeout.Seconds()) // Max seconds per try - if deadline, ok := ctx.Deadline(); ok { // If user's ctx has a deadline, make the timeout the smaller of the two - t := int32(time.Until(deadline).Seconds()) // Duration from now until user's ctx reaches its deadline - logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", timeout, t) - if t < timeout { - timeout = t - } - if timeout < 0 { - timeout = 0 // If timeout ever goes negative, set it to zero; this happen while debugging - } - logf("TryTimeout adjusted to=%d sec\n", timeout) - } - q := requestCopy.Request.URL.Query() - q.Set("timeout", strconv.Itoa(int(timeout+1))) // Add 1 to "round up" - requestCopy.Request.URL.RawQuery = q.Encode() - logf("Url=%s\n", requestCopy.Request.URL.String()) - - // Set the time for this particular retry operation and then Do the operation. - tryCtx, tryCancel := context.WithTimeout(ctx, time.Second*time.Duration(timeout)) - //requestCopy.body = &deadlineExceededReadCloser{r: requestCopy.Request.body} - response, err = next.Do(tryCtx, requestCopy) // Make the request - /*err = improveDeadlineExceeded(err) - if err == nil { - response.Response().body = &deadlineExceededReadCloser{r: response.Response().body} - }*/ - logf("Err=%v, response=%v\n", err, response) - - action := "" // This MUST get changed within the switch code below - switch { - case err == nil: - action = "NoRetry: successful HTTP request" // no error - - case !tryingPrimary && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusNotFound: - // If attempt was against the secondary & it returned a StatusNotFound (404), then - // the resource was not found. This may be due to replication delay. So, in this - // case, we'll never try the secondary again for this operation. - considerSecondary = false - action = "Retry: Secondary URL returned 404" - - case ctx.Err() != nil: - action = "NoRetry: Op timeout" - - case err != nil: - // NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation - // retry on all the network errors. - // zc_policy_retry perform the retries on Temporary and Timeout Errors only. - // some errors like 'connection reset by peer' or 'transport connection broken' does not implement the Temporary interface - // but they should be retried. So redefined the retry policy for azcopy to retry for such errors as well. - - // TODO make sure Storage error can be cast to different package's error object - // TODO: Discuss the error handling of Go Blob SDK. - if stErr, ok := err.(azblob.StorageError); ok { - // retry only in case of temporary storage errors. - if stErr.Temporary() { - action = "Retry: StorageError with error service code and Temporary()" - } else if stErr.Response() != nil && isSuccessStatusCode(stErr.Response()) { // This is a temporarily work around. - action = "Retry: StorageError with success status code" - } else { - action = "NoRetry: StorageError not Temporary() and without retriable status code" - } - } else if _, ok := err.(net.Error); ok { - action = "Retry: net.Error" - } else if err == io.ErrUnexpectedEOF { - action = "Retry: io.UnexpectedEOF" - } else { - action = "NoRetry: unrecognized error" - } - - default: - action = "NoRetry: successful HTTP request" // no error - } - - logf("Action=%s\n", action) - if action[0] != 'R' { // Retry only if action starts with 'R' - if err != nil { - tryCancel() // If we're returning an error, cancel this current/last per-retry timeout context - } else { - // We wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper. - // So, when the user closes the Body, the our per-try context gets closed too. - // Another option, is that the Last Policy do this wrapping for a per-retry context (not for the user's context) - if response == nil || response.Response() == nil { - // We do panic in the case response or response.Response() is nil, - // as for client, the response should not be nil if request is sent and the operations is executed successfully. - // Another option, is that execute the cancel function when response or response.Response() is nil, - // as in this case, current per-try has nothing to do in future. - panic("invalid state, response should not be nil when the operation is executed successfully") - } - - response.Response().Body = &contextCancelReadCloser{cf: tryCancel, body: response.Response().Body} - } - break // Don't retry - } - if response.Response() != nil { - // If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection - _, _ = io.Copy(io.Discard, response.Response().Body) - response.Response().Body.Close() - } - // If retrying, cancel the current per-try timeout context - tryCancel() - } - return response, err // Not retryable or too many retries; return the last response/error - } - }) + return runtime.WithRetryOptions(ctx, policy.RetryOptions{MaxRetries: 1}) } var successStatusCodes = []int{http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent, http.StatusPartialContent} diff --git a/ste/xferStatsPolicy.go b/ste/xferStatsPolicy.go index e40792817..c4ccbf2f9 100644 --- a/ste/xferStatsPolicy.go +++ b/ste/xferStatsPolicy.go @@ -24,6 +24,7 @@ import ( "bytes" "context" "github.com/Azure/azure-pipeline-go/pipeline" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-storage-azcopy/v10/common" "io" "net/http" @@ -219,3 +220,42 @@ func newXferStatsPolicyFactory(accumulator *PipelineNetworkStats) pipeline.Facto return r.Do }) } + +type statsPolicy struct { + stats *PipelineNetworkStats +} + +func (s statsPolicy) Do(req *policy.Request) (*http.Response, error) { + start := time.Now() + + response, err := req.Next() + if s.stats != nil { + if s.stats.IsStarted() { + atomic.AddInt64(&s.stats.atomicOperationCount, 1) + atomic.AddInt64(&s.stats.atomicE2ETotalMilliseconds, int64(time.Since(start).Seconds()*1000)) + + if err != nil && !isContextCancelledError(err) { + // no response from server + atomic.AddInt64(&s.stats.atomicNetworkErrorCount, 1) + } + } + + // always look at retries, even if not started, because concurrency tuner needs to know about them + // TODO should we also count status 500? It is mentioned here as timeout:https://docs.microsoft.com/en-us/azure/storage/common/storage-scalability-targets + if response != nil && response.StatusCode == http.StatusServiceUnavailable { + s.stats.tunerInterface.recordRetry() // always tell the tuner + if s.stats.IsStarted() { // but only count it here, if we have started + // To find out why the server was busy we need to look at the response + responseBodyText := transparentlyReadBody(response) + s.stats.recordRetry(responseBodyText) + } + + } + } + + return response, err +} + +func newStatsPolicy(accumulator *PipelineNetworkStats) policy.Policy { + return statsPolicy{stats: accumulator} +} diff --git a/ste/xferVersionPolicy.go b/ste/xferVersionPolicy.go new file mode 100644 index 000000000..b3afdbe2f --- /dev/null +++ b/ste/xferVersionPolicy.go @@ -0,0 +1,120 @@ +// Copyright © Microsoft +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package ste + +import ( + "context" + "github.com/Azure/azure-pipeline-go/pipeline" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-storage-azcopy/v10/common" + "net/http" +) + +type serviceAPIVersionOverride struct{} + +// ServiceAPIVersionOverride is a global variable in package ste which is a key to Service Api Version Value set in the every Job's context. +var ServiceAPIVersionOverride = serviceAPIVersionOverride{} + +// DefaultServiceApiVersion is the default value of service api version that is set as value to the ServiceAPIVersionOverride in every Job's context. +var DefaultServiceApiVersion = common.GetLifecycleMgr().GetEnvironmentVariable(common.EEnvironmentVariable.DefaultServiceApiVersion()) + +// NewVersionPolicyFactory creates a factory that can override the service version +// set in the request header. +// If the context has key overwrite-current-version set to false, then x-ms-version in +// request is not overwritten else it will set x-ms-version to 207-04-17 +func NewVersionPolicyFactory() pipeline.Factory { + return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { + return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + // get the service api version value using the ServiceAPIVersionOverride set in the context. + if value := ctx.Value(ServiceAPIVersionOverride); value != nil { + request.Header.Set("x-ms-version", value.(string)) + } + resp, err := next.Do(ctx, request) + return resp, err + } + }) +} + +type versionPolicy struct { +} + +func newVersionPolicy() policy.Policy { + return &versionPolicy{} +} + +func (r *versionPolicy) Do(req *policy.Request) (*http.Response, error) { + // get the service api version value using the ServiceAPIVersionOverride set in the context. + if value := req.Raw().Context().Value(ServiceAPIVersionOverride); value != nil { + req.Raw().Header["x-ms-version"] = []string{value.(string)} + } + return req.Next() +} + +// TODO: Delete me when bumping the service version is no longer relevant. +type coldTierPolicy struct { +} + +func newColdTierPolicy() policy.Policy { + return &coldTierPolicy{} +} + +func (r *coldTierPolicy) Do(req *policy.Request) (*http.Response, error) { + if req.Raw().Header.Get("x-ms-access-tier") == common.EBlockBlobTier.Cold().String() { + req.Raw().Header["x-ms-version"] = []string{"2021-12-02"} + } + return req.Next() +} + +func NewTrailingDotPolicyFactory(trailingDot common.TrailingDotOption, from common.Location) pipeline.Factory { + return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { + return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + if trailingDot == common.ETrailingDotOption.Enable() { + request.Header.Set("x-ms-allow-trailing-dot", "true") + if from == common.ELocation.File() { + request.Header.Set("x-ms-source-allow-trailing-dot", "true") + } + request.Header.Set("x-ms-version", "2022-11-02") + } + return next.Do(ctx, request) + } + }) +} + +// TODO: Delete me when bumping the service version is no longer relevant. +type trailingDotPolicy struct { + trailingDot *common.TrailingDotOption + from *common.Location +} + +func NewTrailingDotPolicy(trailingDot *common.TrailingDotOption, from *common.Location) policy.Policy { + return &trailingDotPolicy{trailingDot: trailingDot, from: from} +} + +func (r *trailingDotPolicy) Do(req *policy.Request) (*http.Response, error) { + if r.trailingDot != nil && *r.trailingDot == common.ETrailingDotOption.Enable() { + req.Raw().Header.Set("x-ms-allow-trailing-dot", "true") + if r.from != nil && *r.from == common.ELocation.File() { + req.Raw().Header.Set("x-ms-source-allow-trailing-dot", "true") + } + req.Raw().Header["x-ms-version"] = []string{"2022-11-02"} + } + return req.Next() +} \ No newline at end of file diff --git a/testSuite/cmd/clean.go b/testSuite/cmd/clean.go index 03ed0e769..c3c9dc95e 100644 --- a/testSuite/cmd/clean.go +++ b/testSuite/cmd/clean.go @@ -3,7 +3,16 @@ package cmd import ( gcpUtils "cloud.google.com/go/storage" "context" + "errors" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + blobservice "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" + sharefile "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + fileservice "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" "github.com/Azure/azure-storage-azcopy/v10/ste" "google.golang.org/api/iterator" "net/http" @@ -16,8 +25,6 @@ import ( "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-azcopy/v10/azbfs" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" "github.com/JeffreyRichter/enum/enum" "github.com/spf13/cobra" ) @@ -152,24 +159,18 @@ func init() { cleanCmd.PersistentFlags().StringVar(&serviceTypeStr, "serviceType", "Blob", "Account type, could be blob, file or blobFS currently.") } -func cleanContainer(container string) { - containerURLBase, err := url.Parse(container) +func cleanContainer(resourceURL string) { + containerClient := createContainerClient(resourceURL) - if err != nil { - fmt.Println("error parsing the container sas, ", err) - os.Exit(1) - } - - p := createBlobPipeline(*containerURLBase) - containerUrl := azblob.NewContainerURL(*containerURLBase, p) ctx := context.WithValue(context.Background(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) // Create the container. This will fail if it's already present but this saves us the pain of a container being missing for one reason or another. - _, _ = containerUrl.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone) + _, _ = containerClient.Create(ctx, nil) // perform a list blob - for marker := (azblob.Marker{}); marker.NotDone(); { + pager := containerClient.NewListBlobsFlatPager(nil) + for pager.More() { // look for all blobs that start with the prefix, so that if a blob is under the virtual directory, it will show up - listBlob, err := containerUrl.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{}) + listBlob, err := pager.NextPage(ctx) if err != nil { fmt.Println("error listing blobs inside the container. Please check the container sas", err) os.Exit(1) @@ -177,53 +178,36 @@ func cleanContainer(container string) { // Process the blobs returned in this result segment (if the segment is empty, the loop body won't execute) for _, blobInfo := range listBlob.Segment.BlobItems { - _, err := containerUrl.NewBlobURL(blobInfo.Name).Delete(ctx, "include", azblob.BlobAccessConditions{}) + _, err := containerClient.NewBlobClient(*blobInfo.Name).Delete(ctx, &blob.DeleteOptions{DeleteSnapshots: to.Ptr(blob.DeleteSnapshotsOptionTypeInclude)}) if err != nil { fmt.Println("error deleting the blob from container ", blobInfo.Name) os.Exit(1) } } - marker = listBlob.NextMarker } } -func cleanBlob(blob string) { - blobURLBase, err := url.Parse(blob) - - if err != nil { - fmt.Println("error parsing the container sas ", err) - os.Exit(1) - } - - p := createBlobPipeline(*blobURLBase) - blobUrl := azblob.NewBlobURL(*blobURLBase, p) +func cleanBlob(resourceURL string) { + blobClient := createBlobClient(resourceURL) ctx := context.WithValue(context.Background(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) - _, err = blobUrl.Delete(ctx, "include", azblob.BlobAccessConditions{}) + _, err := blobClient.Delete(ctx, &blob.DeleteOptions{DeleteSnapshots: to.Ptr(blob.DeleteSnapshotsOptionTypeInclude)}) if err != nil { fmt.Println("error deleting the blob ", err) os.Exit(1) } } -func cleanShare(shareURLStr string) { - u, err := url.Parse(shareURLStr) - - if err != nil { - fmt.Println("error parsing the share URL with SAS ", err) - os.Exit(1) - } - - p := createFilePipeline(*u) - shareURL := azfile.NewShareURL(*u, p) +func cleanShare(resourceURL string) { + shareClient := createShareClient(resourceURL) ctx := context.WithValue(context.Background(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) // Create the share. This will fail if it's already present but this saves us the pain of a container being missing for one reason or another. - _, _ = shareURL.Create(ctx, azfile.Metadata{}, 0) + _, _ = shareClient.Create(ctx, nil) - _, err = shareURL.Delete(ctx, azfile.DeleteSnapshotsOptionInclude) + _, err := shareClient.Delete(ctx, &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) if err != nil { - sErr, sErrOk := err.(azfile.StorageError) - if sErrOk && sErr.Response().StatusCode != http.StatusNotFound { + var respErr *azcore.ResponseError + if errors.As(err, &respErr) && respErr.StatusCode != http.StatusNotFound { fmt.Fprintf(os.Stdout, "error deleting the share for clean share, error '%v'\n", err) os.Exit(1) } @@ -232,76 +216,182 @@ func cleanShare(shareURLStr string) { // Sleep seconds to wait the share deletion got succeeded time.Sleep(45 * time.Second) - _, err = shareURL.Create(ctx, azfile.Metadata{}, 0) + _, err = shareClient.Create(ctx, nil) if err != nil { fmt.Fprintf(os.Stdout, "error creating the share for clean share, error '%v'\n", err) os.Exit(1) } } -func cleanFile(fileURLStr string) { - u, err := url.Parse(fileURLStr) +func cleanFile(resourceURL string) { + fileClient := createShareFileClient(resourceURL) + ctx := context.WithValue(context.Background(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) + _, err := fileClient.Delete(ctx, nil) if err != nil { - fmt.Println("error parsing the file URL with SAS", err) + fmt.Println("error deleting the file ", err) os.Exit(1) } +} - p := createFilePipeline(*u) - fileURL := azfile.NewFileURL(*u, p) - ctx := context.WithValue(context.Background(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) +func createBlobClient(resourceURL string) *blob.Client { + blobURLParts, err := blob.ParseURL(resourceURL) + if err != nil { + fmt.Println("Failed to parse url") + os.Exit(1) + } + containerClient := createContainerClient(resourceURL) + blobClient := containerClient.NewBlobClient(blobURLParts.BlobName) + if blobURLParts.Snapshot != "" { + blobClient, err = blobClient.WithSnapshot(blobURLParts.Snapshot) + if err != nil { + fmt.Println("Failed to create snapshot client") + os.Exit(1) + } + } + if blobURLParts.VersionID != "" { + blobClient, err = blobClient.WithVersionID(blobURLParts.VersionID) + if err != nil { + fmt.Println("Failed to create version id client") + os.Exit(1) + } + } - _, err = fileURL.Delete(ctx) + return blobClient +} + +func createContainerClient(resourceURL string) *container.Client { + blobURLParts, err := blob.ParseURL(resourceURL) if err != nil { - fmt.Println("error deleting the file ", err) + fmt.Println("Failed to parse url") os.Exit(1) } + return createBlobServiceClient(resourceURL).NewContainerClient(blobURLParts.ContainerName) } -func createBlobPipeline(u url.URL) pipeline.Pipeline { +func createBlobServiceClient(resourceURL string) *blobservice.Client { + blobURLParts, err := blob.ParseURL(resourceURL) + if err != nil { + fmt.Println("Failed to parse url") + os.Exit(1) + } + blobURLParts.ContainerName = "" + blobURLParts.BlobName = "" + blobURLParts.VersionID = "" + blobURLParts.Snapshot = "" + + // create the pipeline, preferring SAS over account name/key + if blobURLParts.SAS.Encode() != "" { + bsc, err := blobservice.NewClientWithNoCredential(blobURLParts.String(), nil) + if err != nil { + fmt.Println("Failed to create blob service client") + os.Exit(1) + } + return bsc + } + // Get name and key variables from environment. name := os.Getenv("ACCOUNT_NAME") key := os.Getenv("ACCOUNT_KEY") - blobURLParts := azblob.NewBlobURLParts(u) // If the ACCOUNT_NAME and ACCOUNT_KEY are not set in the environment, and there is no SAS token present if (name == "" && key == "") && blobURLParts.SAS.Encode() == "" { fmt.Println("ACCOUNT_NAME and ACCOUNT_KEY should be set, or a SAS token should be supplied before cleaning the file system") os.Exit(1) } - // create the pipeline, preferring SAS over account name/key - if blobURLParts.SAS.Encode() != "" { - return azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) + c, err := blob.NewSharedKeyCredential(name, key) + if err != nil { + fmt.Println("Failed to create shared key credential!") + os.Exit(1) + } + bsc, err := blobservice.NewClientWithSharedKeyCredential(blobURLParts.String(), c, nil) + if err != nil { + fmt.Println("Failed to create blob service client") + os.Exit(1) } + return bsc +} - c, err := azblob.NewSharedKeyCredential(name, key) +func createShareFileClient(resourceURL string) *sharefile.Client { + fileURLParts, err := sharefile.ParseURL(resourceURL) if err != nil { - fmt.Println("Failed to create shared key credential!") + fmt.Println("Failed to parse url") os.Exit(1) } - return azblob.NewPipeline(c, azblob.PipelineOptions{}) + shareClient := createShareClient(resourceURL) + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileURLParts.DirectoryOrFilePath) + return fileClient } -func createFilePipeline(u url.URL) pipeline.Pipeline { - name := os.Getenv("ACCOUNT_NAME") - key := os.Getenv("ACCOUNT_KEY") - fileURLParts := azfile.NewFileURLParts(u) - // If the ACCOUNT_NAME and ACCOUNT_KEY are not set in the environment, and there is no SAS token present - if (name == "" && key == "") && fileURLParts.SAS.Encode() == "" { - fmt.Println("ACCOUNT_NAME and ACCOUNT_KEY should be set, or a SAS token should be supplied before cleaning the file system") +//func createShareDirectoryClient(resourceURL string) *sharedirectory.Client { +// fileURLParts, err := sharefile.ParseURL(resourceURL) +// if err != nil { +// fmt.Println("Failed to parse url") +// os.Exit(1) +// } +// shareClient := createShareClient(resourceURL) +// if fileURLParts.DirectoryOrFilePath == "" { +// return shareClient.NewRootDirectoryClient() +// } else { +// return shareClient.NewDirectoryClient(fileURLParts.DirectoryOrFilePath) +// } +//} + +func createShareClient(resourceURL string) *share.Client { + fileURLParts, err := sharefile.ParseURL(resourceURL) + if err != nil { + fmt.Println("Failed to parse url") + os.Exit(1) + } + sc := createFileServiceClient(resourceURL).NewShareClient(fileURLParts.ShareName) + if fileURLParts.ShareSnapshot != "" { + sc, err = sc.WithSnapshot(fileURLParts.ShareSnapshot) + if err != nil { + fmt.Println("Failed to parse snapshot") + os.Exit(1) + } + } + return sc +} + +func createFileServiceClient(resourceURL string) *fileservice.Client { + fileURLParts, err := sharefile.ParseURL(resourceURL) + if err != nil { + fmt.Println("Failed to parse url") os.Exit(1) } + fileURLParts.ShareName = "" + fileURLParts.ShareSnapshot = "" + fileURLParts.DirectoryOrFilePath = "" // create the pipeline, preferring SAS over account name/key if fileURLParts.SAS.Encode() != "" { - return azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{}) + fsc, err := fileservice.NewClientWithNoCredential(fileURLParts.String(), nil) + if err != nil { + fmt.Println("Failed to create blob service client") + os.Exit(1) + } + return fsc } - c, err := azfile.NewSharedKeyCredential(name, key) + // Get name and key variables from environment. + name := os.Getenv("ACCOUNT_NAME") + key := os.Getenv("ACCOUNT_KEY") + // If the ACCOUNT_NAME and ACCOUNT_KEY are not set in the environment, and there is no SAS token present + if (name == "" && key == "") && fileURLParts.SAS.Encode() == "" { + fmt.Println("ACCOUNT_NAME and ACCOUNT_KEY should be set, or a SAS token should be supplied before cleaning the file system") + os.Exit(1) + } + c, err := sharefile.NewSharedKeyCredential(name, key) if err != nil { fmt.Println("Failed to create shared key credential!") os.Exit(1) } - return azfile.NewPipeline(c, azfile.PipelineOptions{}) + fsc, err := fileservice.NewClientWithSharedKeyCredential(fileURLParts.String(), c, nil) + if err != nil { + fmt.Println("Failed to create blob service client") + os.Exit(1) + } + return fsc } func createBlobFSPipeline(u url.URL) pipeline.Pipeline { @@ -369,64 +459,49 @@ func cleanBfsFile(fileURLStr string) { } func cleanBlobAccount(resourceURL string) { - accountURLBase, err := url.Parse(resourceURL) - - if err != nil { - fmt.Println("error parsing the account sas ", err) - os.Exit(1) - } - - p := createBlobPipeline(*accountURLBase) - accountURL := azblob.NewServiceURL(*accountURLBase, p) + serviceClient := createBlobServiceClient(resourceURL) ctx := context.WithValue(context.Background(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) // perform a list account - for marker := (azblob.Marker{}); marker.NotDone(); { + pager := serviceClient.NewListContainersPager(nil) + + for pager.More() { // look for all blobs that start with the prefix, so that if a blob is under the virtual directory, it will show up - lResp, err := accountURL.ListContainersSegment(ctx, marker, azblob.ListContainersSegmentOptions{}) + lResp, err := pager.NextPage(ctx) if err != nil { fmt.Println("error listing containers, please check the container sas, ", err) os.Exit(1) } for _, containerItem := range lResp.ContainerItems { - _, err := accountURL.NewContainerURL(containerItem.Name).Delete(ctx, azblob.ContainerAccessConditions{}) + _, err := serviceClient.NewContainerClient(*containerItem.Name).Delete(ctx, nil) if err != nil { fmt.Println("error deleting the container from account, ", err) os.Exit(1) } } - marker = lResp.NextMarker } } func cleanFileAccount(resourceURL string) { - accountURLBase, err := url.Parse(resourceURL) - - if err != nil { - fmt.Println("error parsing the account sas ", err) - os.Exit(1) - } - - p := createFilePipeline(*accountURLBase) - accountURL := azfile.NewServiceURL(*accountURLBase, p) + serviceClient := createFileServiceClient(resourceURL) ctx := context.WithValue(context.Background(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) // perform a list account - for marker := (azfile.Marker{}); marker.NotDone(); { + pager := serviceClient.NewListSharesPager(nil) + for pager.More() { // look for all blobs that start with the prefix, so that if a blob is under the virtual directory, it will show up - lResp, err := accountURL.ListSharesSegment(ctx, marker, azfile.ListSharesOptions{}) + lResp, err := pager.NextPage(ctx) if err != nil { fmt.Println("error listing shares, please check the share sas, ", err) os.Exit(1) } - for _, shareItem := range lResp.ShareItems { - _, err := accountURL.NewShareURL(shareItem.Name).Delete(ctx, azfile.DeleteSnapshotsOptionInclude) + for _, shareItem := range lResp.Shares { + _, err := serviceClient.NewShareClient(*shareItem.Name).Delete(ctx, &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) if err != nil { fmt.Println("error deleting the share from account, ", err) os.Exit(1) } } - marker = lResp.NextMarker } } diff --git a/testSuite/cmd/common.go b/testSuite/cmd/common.go index 7024a3cc8..f08c1b2bc 100644 --- a/testSuite/cmd/common.go +++ b/testSuite/cmd/common.go @@ -3,12 +3,14 @@ package cmd import ( gcpUtils "cloud.google.com/go/storage" "context" + "errors" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-storage-azcopy/v10/azbfs" "net/http" "os" "strings" - "github.com/Azure/azure-storage-blob-go/azblob" minio "github.com/minio/minio-go" "github.com/Azure/azure-storage-azcopy/v10/common" @@ -63,9 +65,16 @@ func createGCPClientWithGCSSDK() (*gcpUtils.Client, error) { func ignoreStorageConflictStatus(err error) error { if err != nil { + if stgErr, ok := err.(azbfs.StorageError); ok && (stgErr.Response().StatusCode != http.StatusConflict) { + return err + } // Skip the error, when resource already exists. - if stgErr, ok := err.(azblob.StorageError); !ok || - (stgErr.Response().StatusCode != http.StatusConflict) { + var respErr *azcore.ResponseError + if errors.As(err, &respErr) { + if respErr.StatusCode != http.StatusConflict { + return err + } + } else { return err } } diff --git a/testSuite/cmd/create.go b/testSuite/cmd/create.go index 73ac0bb7e..5aafd9596 100644 --- a/testSuite/cmd/create.go +++ b/testSuite/cmd/create.go @@ -6,6 +6,14 @@ import ( "context" "crypto/md5" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + sharedirectory "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/directory" + sharefile "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" "net/url" "os" "time" @@ -16,8 +24,6 @@ import ( "strings" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-file-go/azfile" minio "github.com/minio/minio-go" "github.com/spf13/cobra" ) @@ -45,7 +51,7 @@ func init() { resourceTypeStr := "" blobSize := uint32(0) - metaData := "" + metadata := "" contentType := "" contentEncoding := "" contentDisposition := "" @@ -53,7 +59,7 @@ func init() { cacheControl := "" contentMD5 := "" location := "" - tier := azblob.DefaultAccessTier + var tier *blob.AccessTier = nil createCmd := &cobra.Command{ Use: "create", @@ -91,14 +97,14 @@ func init() { createBlob( resourceURL, blobSize, - getBlobMetadata(metaData), - azblob.BlobHTTPHeaders{ - ContentType: contentType, - ContentDisposition: contentDisposition, - ContentEncoding: contentEncoding, - ContentLanguage: contentLanguage, - ContentMD5: md5, - CacheControl: cacheControl, + getMetadata(metadata), + &blob.HTTPHeaders{ + BlobContentType: &contentType, + BlobContentDisposition: &contentDisposition, + BlobContentEncoding: &contentEncoding, + BlobContentLanguage: &contentLanguage, + BlobContentMD5: md5, + BlobCacheControl: &cacheControl, }, tier) default: panic(fmt.Errorf("not implemented %v", resourceType)) @@ -111,14 +117,14 @@ func init() { createFile( resourceURL, blobSize, - getFileMetadata(metaData), - azfile.FileHTTPHeaders{ - ContentType: contentType, - ContentDisposition: contentDisposition, - ContentEncoding: contentEncoding, - ContentLanguage: contentLanguage, + getMetadata(metadata), + &sharefile.HTTPHeaders{ + ContentType: &contentType, + ContentDisposition: &contentDisposition, + ContentEncoding: &contentEncoding, + ContentLanguage: &contentLanguage, ContentMD5: md5, - CacheControl: cacheControl, + CacheControl: &cacheControl, }) default: panic(fmt.Errorf("not implemented %v", resourceType)) @@ -140,7 +146,7 @@ func init() { ContentEncoding: contentEncoding, ContentLanguage: contentLanguage, CacheControl: cacheControl, - UserMetadata: getS3Metadata(metaData), + UserMetadata: getS3Metadata(metadata), }) default: panic(fmt.Errorf("not implemented %v", resourceType)) @@ -156,7 +162,7 @@ func init() { ContentEncoding: contentEncoding, ContentLanguage: contentLanguage, CacheControl: cacheControl, - Metadata: getS3Metadata(metaData), + Metadata: getS3Metadata(metadata), }) } case EServiceType.BlobFS(): @@ -171,7 +177,7 @@ func init() { createCmd.PersistentFlags().StringVar(&serviceTypeStr, "serviceType", "Blob", "Service type, could be blob, file or blobFS currently.") createCmd.PersistentFlags().StringVar(&resourceTypeStr, "resourceType", "SingleFile", "Resource type, could be a single file, bucket.") createCmd.PersistentFlags().Uint32Var(&blobSize, "blob-size", 0, "") - createCmd.PersistentFlags().StringVar(&metaData, "metadata", "", "metadata for blob.") + createCmd.PersistentFlags().StringVar(&metadata, "metadata", "", "metadata for blob.") createCmd.PersistentFlags().StringVar(&contentType, "content-type", "", "content type for blob.") createCmd.PersistentFlags().StringVar(&contentEncoding, "content-encoding", "", "content encoding for blob.") createCmd.PersistentFlags().StringVar(&contentDisposition, "content-disposition", "", "content disposition for blob.") @@ -183,28 +189,14 @@ func init() { } -func getBlobMetadata(metadataString string) azblob.Metadata { - var metadata azblob.Metadata - - if len(metadataString) > 0 { - metadata = azblob.Metadata{} - for _, keyAndValue := range strings.Split(metadataString, ";") { // key/value pairs are separated by ';' - kv := strings.Split(keyAndValue, "=") // key/value are separated by '=' - metadata[kv[0]] = kv[1] - } - } - - return metadata -} - -func getFileMetadata(metadataString string) azfile.Metadata { - var metadata azfile.Metadata +func getMetadata(metadataString string) map[string]*string { + var metadata map[string]*string if len(metadataString) > 0 { - metadata = azfile.Metadata{} + metadata = map[string]*string{} for _, keyAndValue := range strings.Split(metadataString, ";") { // key/value pairs are separated by ';' kv := strings.Split(keyAndValue, "=") // key/value are separated by '=' - metadata[kv[0]] = kv[1] + metadata[kv[0]] = to.Ptr(kv[1]) } } @@ -225,18 +217,9 @@ func getS3Metadata(metadataString string) map[string]string { } // Can be used for overwrite scenarios. -func createContainer(container string) { - u, err := url.Parse(container) - - if err != nil { - fmt.Println("error parsing the container URL with SAS ", err) - os.Exit(1) - } - - p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) - - containerURL := azblob.NewContainerURL(*u, p) - _, err = containerURL.Create(context.Background(), azblob.Metadata{}, azblob.PublicAccessNone) +func createContainer(containerURL string) { + containerClient, _ := container.NewClientWithNoCredential(containerURL, nil) + _, err := containerClient.Create(context.Background(), nil) if ignoreStorageConflictStatus(err) != nil { fmt.Println("fail to create container, ", err) @@ -244,74 +227,55 @@ func createContainer(container string) { } } -func createBlob(blobURL string, blobSize uint32, metadata azblob.Metadata, blobHTTPHeaders azblob.BlobHTTPHeaders, tier azblob.AccessTierType) { - url, err := url.Parse(blobURL) - if err != nil { - fmt.Println("error parsing the blob sas ", err) - os.Exit(1) - } - p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) - blobUrl := azblob.NewBlockBlobURL(*url, p) +func createBlob(blobURL string, blobSize uint32, metadata map[string]*string, blobHTTPHeaders *blob.HTTPHeaders, tier *blob.AccessTier) { + blobClient, _ := blockblob.NewClientWithNoCredential(blobURL, nil) randomString := createStringWithRandomChars(int(blobSize)) - if blobHTTPHeaders.ContentType == "" { - blobHTTPHeaders.ContentType = strings.Split(http.DetectContentType([]byte(randomString)), ";")[0] + if blobHTTPHeaders.BlobContentType == nil { + blobHTTPHeaders.BlobContentType = to.Ptr(strings.Split(http.DetectContentType([]byte(randomString)), ";")[0]) } // Generate a content MD5 for the new blob if requested if genMD5 { md5hasher := md5.New() md5hasher.Write([]byte(randomString)) - blobHTTPHeaders.ContentMD5 = md5hasher.Sum(nil) + blobHTTPHeaders.BlobContentMD5 = md5hasher.Sum(nil) } - putBlobResp, err := blobUrl.Upload( - context.Background(), - strings.NewReader(randomString), - blobHTTPHeaders, - metadata, - azblob.BlobAccessConditions{}, - tier, - nil, - azblob.ClientProvidedKeyOptions{}, - azblob.ImmutabilityPolicyOptions{}) + _, err := blobClient.Upload(context.Background(), streaming.NopCloser(strings.NewReader(randomString)), + &blockblob.UploadOptions{ + HTTPHeaders: blobHTTPHeaders, + Metadata: metadata, + Tier: tier, + }) if err != nil { fmt.Printf("error uploading the blob %v\n", err) os.Exit(1) } - if putBlobResp.Response() != nil { - _, _ = io.Copy(io.Discard, putBlobResp.Response().Body) - putBlobResp.Response().Body.Close() - } } func createShareOrDirectory(shareOrDirectoryURLStr string) { - u, err := url.Parse(shareOrDirectoryURLStr) - + fileURLParts, err := sharefile.ParseURL(shareOrDirectoryURLStr) if err != nil { - fmt.Println("error parsing the share or directory URL with SAS ", err) + fmt.Println("error createShareOrDirectory with URL, ", err) os.Exit(1) } - p := azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{}) - - fileURLPart := azfile.NewFileURLParts(*u) - isShare := false - if fileURLPart.ShareName != "" && fileURLPart.DirectoryOrFilePath == "" { + if fileURLParts.ShareName != "" && fileURLParts.DirectoryOrFilePath == "" { isShare = true // This is a share - shareURL := azfile.NewShareURL(*u, p) - _, err := shareURL.Create(context.Background(), azfile.Metadata{}, 0) + shareClient, _ := share.NewClientWithNoCredential(shareOrDirectoryURLStr, nil) + _, err := shareClient.Create(context.Background(), nil) if ignoreStorageConflictStatus(err) != nil { fmt.Println("fail to create share, ", err) os.Exit(1) } } - dirURL := azfile.NewDirectoryURL(*u, p) // i.e. root directory, in share's case + directoryClient, _ := sharedirectory.NewClientWithNoCredential(shareOrDirectoryURLStr, nil) // i.e. root directory, in share's case if !isShare { - _, err := dirURL.Create(context.Background(), azfile.Metadata{}, azfile.SMBProperties{}) + _, err := directoryClient.Create(context.Background(), nil) if ignoreStorageConflictStatus(err) != nil { fmt.Println("fail to create directory, ", err) os.Exit(1) @@ -321,25 +285,19 @@ func createShareOrDirectory(shareOrDirectoryURLStr string) { // Finally valdiate if directory with specified URL exists, if doesn't exist, then report create failure. time.Sleep(1 * time.Second) - _, err = dirURL.GetProperties(context.Background()) + _, err = directoryClient.GetProperties(context.Background(), nil) if err != nil { fmt.Println("error createShareOrDirectory with URL, ", err) os.Exit(1) } } -func createFile(fileURLStr string, fileSize uint32, metadata azfile.Metadata, fileHTTPHeaders azfile.FileHTTPHeaders) { - url, err := url.Parse(fileURLStr) - if err != nil { - fmt.Println("error parsing the blob sas ", err) - os.Exit(1) - } - p := azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{}) - fileURL := azfile.NewFileURL(*url, p) +func createFile(fileURLStr string, fileSize uint32, metadata map[string]*string, fileHTTPHeaders *sharefile.HTTPHeaders) { + fileClient, _ := sharefile.NewClientWithNoCredential(fileURLStr, nil) randomString := createStringWithRandomChars(int(fileSize)) - if fileHTTPHeaders.ContentType == "" { - fileHTTPHeaders.ContentType = strings.Split(http.DetectContentType([]byte(randomString)), ";")[0] + if fileHTTPHeaders.ContentType == nil { + fileHTTPHeaders.ContentType = to.Ptr(strings.Split(http.DetectContentType([]byte(randomString)), ";")[0]) } // Generate a content MD5 for the new blob if requested @@ -349,14 +307,21 @@ func createFile(fileURLStr string, fileSize uint32, metadata azfile.Metadata, fi fileHTTPHeaders.ContentMD5 = md5hasher.Sum(nil) } - err = azfile.UploadBufferToAzureFile(context.Background(), []byte(randomString), fileURL, azfile.UploadToAzureFileOptions{ - FileHTTPHeaders: fileHTTPHeaders, - Metadata: metadata, - }) + _, err := fileClient.Create(context.Background(), int64(fileSize), &sharefile.CreateOptions{HTTPHeaders: fileHTTPHeaders, Metadata: metadata}) + if err != nil { - fmt.Printf("error uploading the file %v\n", err) + fmt.Printf("error creating the file %v\n", err) os.Exit(1) } + + if fileSize > 0 { + err = fileClient.UploadBuffer(context.Background(), []byte(randomString), nil) + + if err != nil { + fmt.Printf("error uploading the file %v\n", err) + os.Exit(1) + } + } } func createBucket(bucketURLStr string) { diff --git a/testSuite/cmd/list.go b/testSuite/cmd/list.go index b1f27c7c8..ad4a9ce0c 100644 --- a/testSuite/cmd/list.go +++ b/testSuite/cmd/list.go @@ -3,12 +3,14 @@ package cmd import ( "context" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" "net/url" "os" "strings" "github.com/Azure/azure-storage-azcopy/v10/ste" - "github.com/Azure/azure-storage-blob-go/azblob" "github.com/spf13/cobra" ) @@ -38,10 +40,10 @@ func init() { cleanCmd.PersistentFlags().Int64Var(&numberOfResource, "resource-num", 0, "number of resource inside the container") } -func getContainerURLFromString(url url.URL) url.URL { +func getContainerURLFromString(url url.URL) string { containerName := strings.SplitAfterN(url.Path[1:], "/", 2)[0] url.Path = "/" + containerName - return url + return url.String() } // checks if a given url points to a container, as opposed to a blob or prefix match @@ -62,20 +64,7 @@ func getBlobNameFromURL(path string) string { return strings.SplitAfterN(path[1:], "/", 2)[1] } -func listContainer(resourceUrl string, numberOfresource int64) { - - p := azblob.NewPipeline( - azblob.NewAnonymousCredential(), - azblob.PipelineOptions{ - Retry: azblob.RetryOptions{ - Policy: azblob.RetryPolicyExponential, - MaxTries: ste.UploadMaxTries, - TryTimeout: ste.UploadTryTimeout, - RetryDelay: ste.UploadRetryDelay, - MaxRetryDelay: ste.UploadMaxRetryDelay, - }, - }) - +func listContainer(resourceUrl string, numberOfResources int64) { // attempt to parse the source url sourceUrl, err := url.Parse(resourceUrl) if err != nil { @@ -85,7 +74,18 @@ func listContainer(resourceUrl string, numberOfresource int64) { // get the container url to be used for listing literalContainerUrl := getContainerURLFromString(*sourceUrl) - containerUrl := azblob.NewContainerURL(literalContainerUrl, p) + cc, err := container.NewClientWithNoCredential(literalContainerUrl, &container.ClientOptions{ClientOptions: azcore.ClientOptions{ + Retry: policy.RetryOptions{ + MaxRetries: ste.UploadMaxTries, + TryTimeout: ste.UploadTryTimeout, + RetryDelay: ste.UploadRetryDelay, + MaxRetryDelay: ste.UploadMaxRetryDelay, + }, + }}) + if err != nil { + fmt.Printf("cannot create container client. Failed with error %s\n", err.Error()) + os.Exit(1) + } // get the search prefix to query the service searchPrefix := "" @@ -99,13 +99,14 @@ func listContainer(resourceUrl string, numberOfresource int64) { searchPrefix += "/" } } - numberOfblobs := int64(0) + numberOfBlobs := int64(0) // perform a list blob - for marker := (azblob.Marker{}); marker.NotDone(); { - // look for all blobs that start with the prefix - listBlob, err := containerUrl.ListBlobsFlatSegment(context.TODO(), marker, - azblob.ListBlobsSegmentOptions{Prefix: searchPrefix}) + // look for all blobs that start with the prefix + pager := cc.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{Prefix: &searchPrefix}) + + for pager.More() { + listBlob, err := pager.NextPage(context.TODO()) if err != nil { fmt.Printf("cannot list blobs for download. Failed with error %s\n", err.Error()) os.Exit(1) @@ -113,18 +114,17 @@ func listContainer(resourceUrl string, numberOfresource int64) { // Process the blobs returned in this result segment (if the segment is empty, the loop body won't execute) for _, blobInfo := range listBlob.Segment.BlobItems { - blobName := blobInfo.Name + blobName := *blobInfo.Name if len(searchPrefix) > 0 { // strip away search prefix from the blob name. blobName = strings.Replace(blobName, searchPrefix, "", 1) //nolint:ineffassign,staticcheck } - numberOfblobs++ + numberOfBlobs++ } - marker = listBlob.NextMarker } - if numberOfblobs != numberOfresource { - fmt.Printf("expected number of blobs / file %d inside the resource does not match the actual %d\n", numberOfresource, numberOfblobs) + if numberOfBlobs != numberOfResources { + fmt.Printf("expected number of blobs / file %d inside the resource does not match the actual %d\n", numberOfResources, numberOfBlobs) os.Exit(1) } } diff --git a/testSuite/cmd/testblob.go b/testSuite/cmd/testblob.go index 0f6090d42..2a30ce3e9 100644 --- a/testSuite/cmd/testblob.go +++ b/testSuite/cmd/testblob.go @@ -4,6 +4,14 @@ import ( "context" "crypto/md5" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob" "io" "net/http" "net/url" @@ -11,12 +19,9 @@ import ( "strings" "time" - "github.com/Azure/azure-pipeline-go/pipeline" - "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-azcopy/v10/ste" - "github.com/Azure/azure-storage-blob-go/azblob" "github.com/spf13/cobra" ) @@ -37,7 +42,7 @@ type TestBlobCommand struct { // If the object is directory, then validation goes through another path. IsObjectDirectory bool // Metadata of the blob to be validated. - MetaData string + Metadata string // NoGuessMimeType represent the azcopy NoGuessMimeType flag set while uploading the blob. NoGuessMimeType bool // Represents the flag to determine whether number of blocks or pages needs @@ -92,7 +97,7 @@ func init() { } rootCmd.AddCommand(testBlobCmd) // add flags. - testBlobCmd.PersistentFlags().StringVar(&cmdInput.MetaData, "metadata", "", "metadata expected from the blob in the container") + testBlobCmd.PersistentFlags().StringVar(&cmdInput.Metadata, "metadata", "", "metadata expected from the blob in the container") testBlobCmd.PersistentFlags().StringVar(&cmdInput.ContentType, "content-type", "", "content type expected from the blob in the container") testBlobCmd.PersistentFlags().StringVar(&cmdInput.ContentEncoding, "content-encoding", "", "Validate content encoding.") testBlobCmd.PersistentFlags().StringVar(&cmdInput.ContentDisposition, "content-disposition", "", "Validate content disposition.") @@ -104,21 +109,24 @@ func init() { testBlobCmd.PersistentFlags().BoolVar(&cmdInput.VerifyBlockOrPageSize, "verify-block-size", false, "this flag verify the block size by determining the number of blocks") testBlobCmd.PersistentFlags().BoolVar(&cmdInput.NoGuessMimeType, "no-guess-mime-type", false, "This sets the content-type based on the extension of the file.") testBlobCmd.PersistentFlags().StringVar(&cmdInput.BlobType, "blob-type", "BlockBlob", "Upload to Azure Storage using this blob type.") - testBlobCmd.PersistentFlags().StringVar(&cmdInput.BlobTier, "blob-tier", string(azblob.AccessTierNone), "access tier type for the block blob") + testBlobCmd.PersistentFlags().StringVar(&cmdInput.BlobTier, "blob-tier", "", "access tier type for the block blob") testBlobCmd.PersistentFlags().BoolVar(&cmdInput.PreserveLastModifiedTime, "preserve-last-modified-time", false, "Only available when destination is file system.") testBlobCmd.PersistentFlags().BoolVar(&cmdInput.CheckContentType, "check-content-type", false, "Validate content type.") } -func verifyBlobType(url url.URL, ctx context.Context, p pipeline.Pipeline, intendedBlobType string) (bool, error) { - bURL := azblob.NewBlobURL(url, p) - pResp, err := bURL.GetProperties(ctx, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) +func verifyBlobType(resourceURL string, ctx context.Context, intendedBlobType string) (bool, error) { + blobClient, err := blob.NewClientWithNoCredential(resourceURL, nil) + if err != nil { + return false, err + } + pResp, err := blobClient.GetProperties(ctx, nil) if err != nil { return false, err } - if string(pResp.BlobType()) != intendedBlobType { - return false, fmt.Errorf("blob URL is not intended blob type %s, but instead %s", intendedBlobType, pResp.BlobType()) + if string(common.IffNotNil(pResp.BlobType, "")) != intendedBlobType { + return false, fmt.Errorf("blob URL is not intended blob type %s, but instead %s", intendedBlobType, common.IffNotNil(pResp.BlobType, "")) } return true, nil @@ -152,29 +160,30 @@ func verifyBlockBlobDirUpload(testBlobCmd TestBlobCommand) { sasUrl.Path = "/" + containerName // Create Pipeline to Get the Blob Properties or List Blob Segment - p := ste.NewBlobPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{ - Telemetry: azblob.TelemetryOptions{ - Value: common.UserAgent, - }, - }, - ste.XferRetryOptions{ - Policy: 0, - MaxTries: ste.UploadMaxTries, - TryTimeout: 10 * time.Minute, - RetryDelay: ste.UploadRetryDelay, - MaxRetryDelay: ste.UploadMaxRetryDelay}, - nil, - ste.NewAzcopyHTTPClient(0), - nil) - containerUrl := azblob.NewContainerURL(*sasUrl, p) + containerClient, err := container.NewClientWithNoCredential(sasUrl.String(), &container.ClientOptions{ + ClientOptions: azcore.ClientOptions{ + Telemetry: policy.TelemetryOptions{ApplicationID: common.UserAgent}, + Retry: policy.RetryOptions{ + MaxRetries: ste.UploadMaxTries, + TryTimeout: 10*time.Minute, + RetryDelay: ste.UploadRetryDelay, + MaxRetryDelay: ste.UploadMaxRetryDelay, + }, + Transport: ste.NewAzcopyHTTPClient(0), + }}) + if err != nil { + fmt.Printf("error creating container client. failed with error %s\n", err.Error()) + os.Exit(1) + } testCtx := context.WithValue(context.Background(), ste.ServiceAPIVersionOverride, defaultServiceApiVersion) // perform a list blob with search prefix "dirname/" dirName := strings.Split(testBlobCmd.Object, "/") searchPrefix := dirName[len(dirName)-1] + "/" - for marker := (azblob.Marker{}); marker.NotDone(); { + pager := containerClient.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{Prefix: &searchPrefix}) + for pager.More() { // look for all blobs that start with the prefix, so that if a blob is under the virtual directory, it will show up - listBlob, err := containerUrl.ListBlobsFlatSegment(testCtx, marker, azblob.ListBlobsSegmentOptions{Prefix: searchPrefix}) + listBlob, err := pager.NextPage(testCtx) if err != nil { fmt.Println("error listing blobs inside the container. Please check the container sas") os.Exit(1) @@ -183,23 +192,21 @@ func verifyBlockBlobDirUpload(testBlobCmd TestBlobCommand) { // Process the blobs returned in this result segment (if the segment is empty, the loop body won't execute) for _, blobInfo := range listBlob.Segment.BlobItems { // get the blob - size := blobInfo.Properties.ContentLength - get, err := containerUrl.NewBlobURL(blobInfo.Name).Download(testCtx, - 0, *size, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) + get, err := containerClient.NewBlobClient(*blobInfo.Name).DownloadStream(testCtx, nil) if err != nil { - fmt.Printf("error downloading the blob %s\n", blobInfo.Name) + fmt.Printf("error downloading the blob %s\n", *blobInfo.Name) os.Exit(1) } // read all bytes. - blobBytesDownloaded, err := io.ReadAll(get.Body(azblob.RetryReaderOptions{})) + blobBytesDownloaded, err := io.ReadAll(get.Body) if err != nil { - fmt.Printf("error reading the body of blob %s downloaded and failed with error %s\n", blobInfo.Name, err.Error()) + fmt.Printf("error reading the body of blob %s downloaded and failed with error %s\n", *blobInfo.Name, err.Error()) os.Exit(1) } // remove the search prefix from the blob name - blobName := strings.Replace(blobInfo.Name, searchPrefix, "", 1) + blobName := strings.Replace(*blobInfo.Name, searchPrefix, "", 1) // blob path on local disk. objectLocalPath := testBlobCmd.Object + string(os.PathSeparator) + blobName // opening the file locally and memory mapping it. @@ -229,40 +236,39 @@ func verifyBlockBlobDirUpload(testBlobCmd TestBlobCommand) { os.Exit(1) } } - marker = listBlob.NextMarker } } -// validateMetadata compares the meta data provided while +// validateMetadata compares the metadata provided while // uploading and metadata with blob in the container. -func validateMetadata(expectedMetaDataString string, actualMetaData azblob.Metadata) bool { - if len(expectedMetaDataString) > 0 { - // split the meta data string to get the map of key value pair +func validateMetadata(expectedMetadataString string, actualMetadata map[string]*string) bool { + if len(expectedMetadataString) > 0 { + // split the metadata string to get the map of key value pair // metadata string is in format key1=value1;key2=value2;key3=value3 - expectedMetaData := azblob.Metadata{} + expectedMetadata := map[string]*string{} // split the metadata to get individual keyvalue pair in format key1=value1 - keyValuePair := strings.Split(expectedMetaDataString, ";") + keyValuePair := strings.Split(expectedMetadataString, ";") for index := 0; index < len(keyValuePair); index++ { // split the individual key value pair to get key and value keyValue := strings.Split(keyValuePair[index], "=") - expectedMetaData[keyValue[0]] = keyValue[1] + expectedMetadata[keyValue[0]] = to.Ptr(keyValue[1]) } // if number of metadata provided while uploading // doesn't match the metadata with blob on the container - if len(expectedMetaData) != len(actualMetaData) { + if len(expectedMetadata) != len(actualMetadata) { fmt.Println("number of user given key value pair of the actual metadata differs from key value pair of expected metaData") return false } // iterating through each key value pair of actual metaData and comparing the key value pair in expected metadata - for key, value := range actualMetaData { - if expectedMetaData[key] != value { - fmt.Printf("value of user given key %s is %s in actual data while it is %s in expected metadata\n", key, value, expectedMetaData[key]) + for key, value := range actualMetadata { + if *expectedMetadata[strings.ToLower(key)] != *value { + fmt.Printf("value of user given key %s is %s in actual data while it is %s in expected metadata\n", key, *value, *expectedMetadata[key]) return false } } } else { - if len(actualMetaData) > 0 { + if len(actualMetadata) > 0 { return false } } @@ -283,69 +289,54 @@ func verifySinglePageBlobUpload(testBlobCmd TestBlobCommand) { fmt.Println("error opening the file ", testBlobCmd.Object) } - // getting the shared access signature of the resource. - sourceURL, err := url.Parse(testBlobCmd.Subject) - if err != nil { - fmt.Println("Error parsing the blob url source") - os.Exit(1) - } - - // creating the page blob url of the resource on container. - // Create Pipeline to Get the Blob Properties or List Blob Segment - p := ste.NewBlobPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{ - Telemetry: azblob.TelemetryOptions{ - Value: common.UserAgent, - }, - }, - ste.XferRetryOptions{ - Policy: 0, - MaxTries: ste.UploadMaxTries, - TryTimeout: 10 * time.Minute, - RetryDelay: ste.UploadRetryDelay, - MaxRetryDelay: ste.UploadMaxRetryDelay}, - nil, - ste.NewAzcopyHTTPClient(0), - nil) - testCtx := context.WithValue(context.Background(), ste.ServiceAPIVersionOverride, defaultServiceApiVersion) - isPage, err := verifyBlobType(*sourceURL, testCtx, p, "PageBlob") + isPage, err := verifyBlobType(testBlobCmd.Subject, testCtx, "PageBlob") if !isPage || err != nil { fmt.Println(err) os.Exit(1) } - pageBlobUrl := azblob.NewPageBlobURL(*sourceURL, p) + pageBlobClient, err := pageblob.NewClientWithNoCredential(testBlobCmd.Subject, &pageblob.ClientOptions{ + ClientOptions: azcore.ClientOptions{ + Telemetry: policy.TelemetryOptions{ApplicationID: common.UserAgent}, + Retry: policy.RetryOptions{ + MaxRetries: ste.UploadMaxTries, + TryTimeout: 10*time.Minute, + RetryDelay: ste.UploadRetryDelay, + MaxRetryDelay: ste.UploadMaxRetryDelay, + }, + Transport: ste.NewAzcopyHTTPClient(0), + }}) + if err != nil { + fmt.Printf("error creating page blob client. failed with error %s\n", err.Error()) + os.Exit(1) + } // get the blob properties and check the blob tier. - if azblob.AccessTierType(testBlobCmd.BlobTier) != azblob.AccessTierNone { - blobProperties, err := pageBlobUrl.GetProperties(testCtx, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) + if testBlobCmd.BlobTier != "" { + blobProperties, err := pageBlobClient.GetProperties(testCtx, nil) if err != nil { fmt.Printf("error getting the properties of the blob. failed with error %s\n", err.Error()) os.Exit(1) } // If the blob tier does not match the expected blob tier. - if !strings.EqualFold(blobProperties.AccessTier(), testBlobCmd.BlobTier) { - fmt.Printf("Access blob tier type %s does not match the expected %s tier type\n", blobProperties.AccessTier(), testBlobCmd.BlobTier) + if !strings.EqualFold(common.IffNotNil(blobProperties.AccessTier, ""), testBlobCmd.BlobTier) { + fmt.Printf("Access blob tier type %s does not match the expected %s tier type\n", common.IffNotNil(blobProperties.AccessTier, ""), testBlobCmd.BlobTier) os.Exit(1) } - // Closing the blobProperties response body. - if blobProperties.Response() != nil { - _, _ = io.Copy(io.Discard, blobProperties.Response().Body) - blobProperties.Response().Body.Close() - } } - get, err := pageBlobUrl.Download(testCtx, 0, fileInfo.Size(), azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) + get, err := pageBlobClient.DownloadStream(testCtx, nil) if err != nil { fmt.Println("unable to get blob properties ", err.Error()) os.Exit(1) } // reading all the bytes downloaded. - blobBytesDownloaded, err := io.ReadAll(get.Body(azblob.RetryReaderOptions{})) - if get.Response().Body != nil { - get.Response().Body.Close() + blobBytesDownloaded, err := io.ReadAll(get.Body) + if get.Body != nil { + get.Body.Close() } if err != nil { fmt.Println("error reading the byes from response and failed with error ", err.Error()) @@ -383,42 +374,42 @@ func verifySinglePageBlobUpload(testBlobCmd TestBlobCommand) { } // verify the content-type - if testBlobCmd.CheckContentType && !validateString(expectedContentType, get.ContentType()) { + if testBlobCmd.CheckContentType && !validateString(expectedContentType, common.IffNotNil(get.ContentType, "")) { fmt.Printf( "mismatch content type between actual and user given blob content type, expected %q, actually %q\n", expectedContentType, - get.ContentType()) + common.IffNotNil(get.ContentType, "")) os.Exit(1) } // verify the user given metadata supplied while uploading the blob against the metadata actually present in the blob - if !validateMetadata(testBlobCmd.MetaData, get.NewMetadata()) { + if !validateMetadata(testBlobCmd.Metadata, get.Metadata) { fmt.Println("meta data does not match between the actual and uploaded blob.") os.Exit(1) } //verify the content-encoding - if !validateString(testBlobCmd.ContentEncoding, get.ContentEncoding()) { + if !validateString(testBlobCmd.ContentEncoding, common.IffNotNil(get.ContentEncoding, "")) { fmt.Println("mismatch ContentEncoding between actual and user given blob") os.Exit(1) } - if !validateString(testBlobCmd.CacheControl, get.CacheControl()) { + if !validateString(testBlobCmd.CacheControl, common.IffNotNil(get.CacheControl, "")) { fmt.Println("mismatch CacheControl between actual and user given blob") os.Exit(1) } - if !validateString(testBlobCmd.ContentDisposition, get.ContentDisposition()) { + if !validateString(testBlobCmd.ContentDisposition, common.IffNotNil(get.ContentDisposition, "")) { fmt.Println("mismatch ContentDisposition between actual and user given blob") os.Exit(1) } - if !validateString(testBlobCmd.ContentLanguage, get.ContentLanguage()) { + if !validateString(testBlobCmd.ContentLanguage, common.IffNotNil(get.ContentLanguage, "")) { fmt.Println("mismatch ContentLanguage between actual and user given blob") os.Exit(1) } - if testBlobCmd.CheckContentMD5 && (get.ContentMD5() == nil || len(get.ContentMD5()) == 0) { + if testBlobCmd.CheckContentMD5 && (get.ContentMD5 == nil || len(get.ContentMD5) == 0) { fmt.Println("ContentMD5 should not be empty") os.Exit(1) } @@ -428,12 +419,18 @@ func verifySinglePageBlobUpload(testBlobCmd TestBlobCommand) { // this verifies the page-size and azcopy pageblob implementation. if testBlobCmd.VerifyBlockOrPageSize { numberOfPages := int(testBlobCmd.NumberOfBlocksOrPages) - resp, err := pageBlobUrl.GetPageRanges(testCtx, 0, 0, azblob.BlobAccessConditions{}) - if err != nil { - fmt.Println("error getting the block blob list ", err.Error()) - os.Exit(1) + pager := pageBlobClient.NewGetPageRangesPager(nil) + pageRanges := []*pageblob.PageRange{} + for pager.More() { + resp, err := pager.NextPage(testCtx) + if err != nil { + fmt.Println("error getting the block blob list ", err.Error()) + os.Exit(1) + } + pageRanges = append(pageRanges, resp.PageRange...) + } - if numberOfPages != (len(resp.PageRange)) { + if numberOfPages != (len(pageRanges)) { fmt.Println("number of blocks to be uploaded is different from the number of expected to be uploaded") os.Exit(1) } @@ -457,76 +454,60 @@ func verifySingleBlockBlob(testBlobCmd TestBlobCommand) { fmt.Println("error opening the file ", objectLocalPath) } - // getting the shared access signature of the resource. - sourceSas := testBlobCmd.Subject - sourceURL, err := url.Parse(sourceSas) - if err != nil { - fmt.Printf("Error parsing the blob url source %s\n", testBlobCmd.Object) - os.Exit(1) - } - - // creating the blockblob url of the resource on container. - // Create Pipeline to Get the Blob Properties or List Blob Segment - p := ste.NewBlobPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{ - Telemetry: azblob.TelemetryOptions{ - Value: common.UserAgent, - }, - }, - ste.XferRetryOptions{ - Policy: 0, - MaxTries: ste.UploadMaxTries, - TryTimeout: 10 * time.Minute, - RetryDelay: ste.UploadRetryDelay, - MaxRetryDelay: ste.UploadMaxRetryDelay}, - nil, - ste.NewAzcopyHTTPClient(0), - nil) - testCtx := context.WithValue(context.Background(), ste.ServiceAPIVersionOverride, defaultServiceApiVersion) - isBlock, err := verifyBlobType(*sourceURL, testCtx, p, "BlockBlob") + isBlock, err := verifyBlobType(testBlobCmd.Subject, testCtx, "BlockBlob") if !isBlock || err != nil { fmt.Println(err) os.Exit(1) } - blobUrl := azblob.NewBlobURL(*sourceURL, p) + blockBlobClient, err := blockblob.NewClientWithNoCredential(testBlobCmd.Subject, &blockblob.ClientOptions{ + ClientOptions: azcore.ClientOptions{ + Telemetry: policy.TelemetryOptions{ApplicationID: common.UserAgent}, + Retry: policy.RetryOptions{ + MaxRetries: ste.UploadMaxTries, + TryTimeout: 10*time.Minute, + RetryDelay: ste.UploadRetryDelay, + MaxRetryDelay: ste.UploadMaxRetryDelay, + }, + Transport: ste.NewAzcopyHTTPClient(0), + }}) + if err != nil { + fmt.Printf("error creating block blob client. failed with error %s\n", err.Error()) + os.Exit(1) + } // check for access tier type // get the blob properties and get the Access Tier Type. - if azblob.AccessTierType(testBlobCmd.BlobTier) != azblob.AccessTierNone { - blobProperties, err := blobUrl.GetProperties(testCtx, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) + if testBlobCmd.BlobTier != "" { + blobProperties, err := blockBlobClient.GetProperties(testCtx, nil) if err != nil { fmt.Printf("error getting the blob properties. Failed with error %s\n", err.Error()) os.Exit(1) } // Match the Access Tier Type with Expected Tier Type. - if !strings.EqualFold(blobProperties.AccessTier(), testBlobCmd.BlobTier) { - fmt.Printf("block blob access tier %s does not matches the expected tier %s\n", blobProperties.AccessTier(), testBlobCmd.BlobTier) + if !strings.EqualFold(common.IffNotNil(blobProperties.AccessTier, ""), testBlobCmd.BlobTier) { + fmt.Printf("block blob access tier %s does not matches the expected tier %s\n", common.IffNotNil(blobProperties.AccessTier, ""), testBlobCmd.BlobTier) os.Exit(1) } - // Closing the blobProperties response. - if blobProperties.Response() != nil { - _, _ = io.Copy(io.Discard, blobProperties.Response().Body) - blobProperties.Response().Body.Close() - } // If the access tier type of blob is set to Archive, then the blob is offline and reading the blob is not allowed, // so exit the test. - if azblob.AccessTierType(testBlobCmd.BlobTier) == azblob.AccessTierArchive { + if blob.AccessTier(testBlobCmd.BlobTier) == blob.AccessTierArchive { os.Exit(0) } } - get, err := blobUrl.Download(testCtx, 0, fileInfo.Size(), azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) + get, err := blockBlobClient.DownloadStream(testCtx, nil) if err != nil { fmt.Println("unable to get blob properties ", err.Error()) os.Exit(1) } // reading all the blob bytes. - blobBytesDownloaded, err := io.ReadAll(get.Body(azblob.RetryReaderOptions{})) - if get.Response().Body != nil { - get.Response().Body.Close() + blobBytesDownloaded, err := io.ReadAll(get.Body) + if get.Body != nil { + get.Body.Close() } if err != nil { fmt.Println("error reading the byes from response and failed with error ", err.Error()) @@ -560,7 +541,7 @@ func verifySingleBlockBlob(testBlobCmd TestBlobCommand) { os.Exit(1) } // verify the user given metadata supplied while uploading the blob against the metadata actually present in the blob - if !validateMetadata(testBlobCmd.MetaData, get.NewMetadata()) { + if !validateMetadata(testBlobCmd.Metadata, get.Metadata) { fmt.Println("meta data does not match between the actual and uploaded blob.") os.Exit(1) } @@ -572,22 +553,22 @@ func verifySingleBlockBlob(testBlobCmd TestBlobCommand) { } else { expectedContentType = strings.Split(http.DetectContentType(mmap), ";")[0] } - if testBlobCmd.CheckContentType && !validateString(expectedContentType, get.ContentType()) { + if testBlobCmd.CheckContentType && !validateString(expectedContentType, common.IffNotNil(get.ContentType, "")) { fmt.Printf( "mismatch content type between actual and user given blob content type, expected %q, actually %q\n", expectedContentType, - get.ContentType()) + common.IffNotNil(get.ContentType, "")) os.Exit(1) } //verify the content-encoding - if !validateString(testBlobCmd.ContentEncoding, get.ContentEncoding()) { + if !validateString(testBlobCmd.ContentEncoding, common.IffNotNil(get.ContentEncoding, "")) { fmt.Println("mismatch content encoding between actual and user given blob content encoding") os.Exit(1) } if testBlobCmd.PreserveLastModifiedTime { - if fileInfo.ModTime().Unix() != get.LastModified().Unix() { + if fileInfo.ModTime().Unix() != (common.IffNotNil(get.LastModified, time.Time{})).Unix() { fmt.Println("modified time of downloaded and actual blob does not match") os.Exit(1) } @@ -603,9 +584,8 @@ func verifySingleBlockBlob(testBlobCmd TestBlobCommand) { // verify the block size if testBlobCmd.VerifyBlockOrPageSize { - blockBlobUrl := azblob.NewBlockBlobURL(*sourceURL, p) numberOfBlocks := int(testBlobCmd.NumberOfBlocksOrPages) - resp, err := blockBlobUrl.GetBlockList(testCtx, azblob.BlockListNone, azblob.LeaseAccessConditions{}) + resp, err := blockBlobClient.GetBlockList(testCtx, blockblob.BlockListTypeCommitted, nil) if err != nil { fmt.Println("error getting the block blob list") os.Exit(1) @@ -630,67 +610,54 @@ func verifySingleAppendBlob(testBlobCmd TestBlobCommand) { fmt.Println("error opening the file ", testBlobCmd.Object) } - // getting the shared access signature of the resource. - sourceURL, err := url.Parse(testBlobCmd.Subject) - if err != nil { - fmt.Printf("Error parsing the blob url source %s\n", testBlobCmd.Object) - os.Exit(1) - } - - p := ste.NewBlobPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{ - Telemetry: azblob.TelemetryOptions{ - Value: common.UserAgent, - }, - }, - ste.XferRetryOptions{ - Policy: 0, - MaxTries: ste.UploadMaxTries, - TryTimeout: 10 * time.Minute, - RetryDelay: ste.UploadRetryDelay, - MaxRetryDelay: ste.UploadMaxRetryDelay}, - nil, - ste.NewAzcopyHTTPClient(0), - nil) - testCtx := context.WithValue(context.Background(), ste.ServiceAPIVersionOverride, defaultServiceApiVersion) - isAppend, err := verifyBlobType(*sourceURL, testCtx, p, "AppendBlob") + isAppend, err := verifyBlobType(testBlobCmd.Subject, testCtx, "AppendBlob") if !isAppend || err != nil { fmt.Println(err) os.Exit(1) } - appendBlobURL := azblob.NewAppendBlobURL(*sourceURL, p) + appendBlobClient, err := appendblob.NewClientWithNoCredential(testBlobCmd.Subject, &appendblob.ClientOptions{ + ClientOptions: azcore.ClientOptions{ + Telemetry: policy.TelemetryOptions{ApplicationID: common.UserAgent}, + Retry: policy.RetryOptions{ + MaxRetries: ste.UploadMaxTries, + TryTimeout: 10*time.Minute, + RetryDelay: ste.UploadRetryDelay, + MaxRetryDelay: ste.UploadMaxRetryDelay, + }, + Transport: ste.NewAzcopyHTTPClient(0), + }}) + if err != nil { + fmt.Printf("error creating append blob client. failed with error %s\n", err.Error()) + os.Exit(1) + } // get the blob properties and check the blob tier. - if azblob.AccessTierType(testBlobCmd.BlobTier) != azblob.AccessTierNone { - blobProperties, err := appendBlobURL.GetProperties(testCtx, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) + if testBlobCmd.BlobTier != "" { + blobProperties, err := appendBlobClient.GetProperties(testCtx, nil) if err != nil { fmt.Printf("error getting the properties of the blob. failed with error %s\n", err.Error()) os.Exit(1) } // If the blob tier does not match the expected blob tier. - if !strings.EqualFold(blobProperties.AccessTier(), testBlobCmd.BlobTier) { - fmt.Printf("Access blob tier type %s does not match the expected %s tier type\n", blobProperties.AccessTier(), testBlobCmd.BlobTier) + if !strings.EqualFold(common.IffNotNil(blobProperties.AccessTier, ""), testBlobCmd.BlobTier) { + fmt.Printf("Access blob tier type %s does not match the expected %s tier type\n", common.IffNotNil(blobProperties.AccessTier, ""), testBlobCmd.BlobTier) os.Exit(1) } - // Closing the blobProperties response body. - if blobProperties.Response() != nil { - _, _ = io.Copy(io.Discard, blobProperties.Response().Body) - blobProperties.Response().Body.Close() - } } - get, err := appendBlobURL.Download(testCtx, 0, fileInfo.Size(), azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) + get, err := appendBlobClient.DownloadStream(testCtx, nil) if err != nil { fmt.Println("unable to get blob properties ", err.Error()) os.Exit(1) } // reading all the bytes downloaded. - blobBytesDownloaded, err := io.ReadAll(get.Body(azblob.RetryReaderOptions{})) - if get.Response().Body != nil { - get.Response().Body.Close() + blobBytesDownloaded, err := io.ReadAll(get.Body) + if get.Body != nil { + get.Body.Close() } if err != nil { fmt.Println("error reading the byes from response and failed with error ", err.Error()) @@ -729,41 +696,41 @@ func verifySingleAppendBlob(testBlobCmd TestBlobCommand) { } // verify the user given metadata supplied while uploading the blob against the metadata actually present in the blob - if !validateMetadata(testBlobCmd.MetaData, get.NewMetadata()) { + if !validateMetadata(testBlobCmd.Metadata, get.Metadata) { fmt.Println("meta data does not match between the actual and uploaded blob.") os.Exit(1) } - if testBlobCmd.CheckContentType && !validateString(expectedContentType, get.ContentType()) { + if testBlobCmd.CheckContentType && !validateString(expectedContentType, common.IffNotNil(get.ContentType, "")) { fmt.Printf( "mismatch content type between actual and user given blob content type, expected %q, actually %q\n", expectedContentType, - get.ContentType()) + common.IffNotNil(get.ContentType, "")) os.Exit(1) } //verify the content-encoding - if !validateString(testBlobCmd.ContentEncoding, get.ContentEncoding()) { + if !validateString(testBlobCmd.ContentEncoding, common.IffNotNil(get.ContentEncoding, "")) { fmt.Println("mismatch ContentEncoding between actual and user given blob") os.Exit(1) } - if !validateString(testBlobCmd.CacheControl, get.CacheControl()) { + if !validateString(testBlobCmd.CacheControl, common.IffNotNil(get.CacheControl, "")) { fmt.Println("mismatch CacheControl between actual and user given blob") os.Exit(1) } - if !validateString(testBlobCmd.ContentDisposition, get.ContentDisposition()) { + if !validateString(testBlobCmd.ContentDisposition, common.IffNotNil(get.ContentDisposition, "")) { fmt.Println("mismatch ContentDisposition between actual and user given blob") os.Exit(1) } - if !validateString(testBlobCmd.ContentLanguage, get.ContentLanguage()) { + if !validateString(testBlobCmd.ContentLanguage, common.IffNotNil(get.ContentLanguage, "")) { fmt.Println("mismatch ContentLanguage between actual and user given blob") os.Exit(1) } - if testBlobCmd.CheckContentMD5 && (get.ContentMD5() == nil || len(get.ContentMD5()) == 0) { + if testBlobCmd.CheckContentMD5 && (get.ContentMD5 == nil || len(get.ContentMD5) == 0) { fmt.Println("ContentMD5 should not be empty") os.Exit(1) } diff --git a/testSuite/cmd/testfile.go b/testSuite/cmd/testfile.go index 735f2b005..58c38201f 100644 --- a/testSuite/cmd/testfile.go +++ b/testSuite/cmd/testfile.go @@ -4,15 +4,15 @@ import ( "context" "crypto/md5" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/directory" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-storage-azcopy/v10/common" + "github.com/spf13/cobra" "io" "net/http" "net/url" "os" "strings" - "time" - - "github.com/Azure/azure-storage-file-go/azfile" - "github.com/spf13/cobra" ) // TestFileCommand represents the struct to get command @@ -108,64 +108,65 @@ func verifyFile(testFileCmd TestFileCommand) { // verifyFileDirUpload verifies the directory recursively uploaded to the share or directory. func verifyFileDirUpload(testFileCmd TestFileCommand) { - // parse the subject url. - sasURL, err := url.Parse(testFileCmd.Subject) + directoryClient, _ := directory.NewClientWithNoCredential(testFileCmd.Subject, nil) + + // get the original dir path, which can be used to get file relative path during enumerating and comparing + fileURLParts, err := file.ParseURL(testFileCmd.Subject) if err != nil { - // fmt.Println("fail to parse the container sas ", testFileCmd.Subject) os.Exit(1) } - - // as it's a directory validation, regard the sasURL as a directory - p := azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{}) - directoryURL := azfile.NewDirectoryURL(*sasURL, p) - - // get the original dir path, which can be used to get file relative path during enumerating and comparing - baseAzureDirPath := azfile.NewFileURLParts(*sasURL).DirectoryOrFilePath + baseAzureDirPath := fileURLParts.DirectoryOrFilePath // validate azure directory - validateAzureDirWithLocalFile(directoryURL, baseAzureDirPath, testFileCmd.Object, testFileCmd.IsRecursive) + validateAzureDirWithLocalFile(directoryClient, baseAzureDirPath, testFileCmd.Object, testFileCmd.IsRecursive) } // recursively validate files in azure directories and sub-directories -func validateAzureDirWithLocalFile(curAzureDirURL azfile.DirectoryURL, baseAzureDirPath string, localBaseDir string, isRecursive bool) { - for marker := (azfile.Marker{}); marker.NotDone(); { +func validateAzureDirWithLocalFile(curAzureDirURL *directory.Client, baseAzureDirPath string, localBaseDir string, isRecursive bool) { + pager := curAzureDirURL.NewListFilesAndDirectoriesPager(nil) + for pager.More() { // look for all files that in current directory - listFile, err := curAzureDirURL.ListFilesAndDirectoriesSegment(context.Background(), marker, azfile.ListFilesAndDirectoriesOptions{}) + listFile, err := pager.NextPage(context.Background()) if err != nil { // fmt.Printf("fail to list files and directories inside the directory. Please check the directory sas, %v\n", err) os.Exit(1) } if isRecursive { - for _, dirInfo := range listFile.DirectoryItems { - newDirURL := curAzureDirURL.NewDirectoryURL(dirInfo.Name) + for _, dirInfo := range listFile.Segment.Directories { + newDirURL := curAzureDirURL.NewSubdirectoryClient(*dirInfo.Name) validateAzureDirWithLocalFile(newDirURL, baseAzureDirPath, localBaseDir, isRecursive) } } // Process the files returned in this result segment (if the segment is empty, the loop body won't execute) - for _, fileInfo := range listFile.FileItems { - curFileURL := curAzureDirURL.NewFileURL(fileInfo.Name) - get, err := curFileURL.Download(context.Background(), 0, azfile.CountToEnd, false) + for _, fileInfo := range listFile.Segment.Files { + curFileURL := curAzureDirURL.NewFileClient(*fileInfo.Name) + get, err := curFileURL.DownloadStream(context.Background(), nil) if err != nil { - fmt.Printf("fail to download the file %s\n", fileInfo.Name) + fmt.Printf("fail to download the file %s\n", *fileInfo.Name) os.Exit(1) } - retryReader := get.Body(azfile.RetryReaderOptions{MaxRetryRequests: 3}) + retryReader := get.NewRetryReader(context.Background(), &file.RetryReaderOptions{MaxRetries: 3}) // read all bytes. fileBytesDownloaded, err := io.ReadAll(retryReader) if err != nil { - fmt.Printf("fail to read the body of file %s downloaded and failed with error %s\n", fileInfo.Name, err.Error()) + fmt.Printf("fail to read the body of file %s downloaded and failed with error %s\n", *fileInfo.Name, err.Error()) os.Exit(1) } retryReader.Close() - tokens := strings.SplitAfterN(curFileURL.URL().Path, baseAzureDirPath, 2) + url, err := url.Parse(curFileURL.URL()) + if err != nil { + fmt.Printf("fail to parse the file URL %s\n", curFileURL.URL()) + os.Exit(1) + } + tokens := strings.SplitAfterN(url.Path, baseAzureDirPath, 2) if len(tokens) < 2 { - fmt.Printf("fail to get sub directory and file name, file URL '%s', original dir path '%s'\n", curFileURL.String(), baseAzureDirPath) + fmt.Printf("fail to get sub directory and file name, file URL '%s', original dir path '%s'\n", curFileURL.URL(), baseAzureDirPath) os.Exit(1) } @@ -205,43 +206,7 @@ func validateAzureDirWithLocalFile(curAzureDirURL azfile.DirectoryURL, baseAzure } } - marker = listFile.NextMarker - } -} - -// validateMetadataForFile compares the meta data provided while -// uploading and metadata with file in the container. -func validateMetadataForFile(expectedMetaDataString string, actualMetaData azfile.Metadata) bool { - if len(expectedMetaDataString) > 0 { - // split the meta data string to get the map of key value pair - // metadata string is in format key1=value1;key2=value2;key3=value3 - expectedMetaData := azfile.Metadata{} - // split the metadata to get individual keyvalue pair in format key1=value1 - keyValuePair := strings.Split(expectedMetaDataString, ";") - for index := 0; index < len(keyValuePair); index++ { - // split the individual key value pair to get key and value - keyValue := strings.Split(keyValuePair[index], "=") - expectedMetaData[keyValue[0]] = keyValue[1] - } - // if number of metadata provided while uploading - // doesn't match the metadata with file on the container - if len(expectedMetaData) != len(actualMetaData) { - fmt.Println("number of user given key value pair of the actual metadata differs from key value pair of expected metaData") - return false - } - // iterating through each key value pair of actual metaData and comparing the key value pair in expected metadata - for key, value := range actualMetaData { - if expectedMetaData[key] != value { - fmt.Printf("value of user given key %s is %s in actual data while it is %s in expected metadata\n", key, value, expectedMetaData[key]) - return false - } - } - } else { - if len(actualMetaData) > 0 { - return false - } } - return true } // verifySingleFileUpload verifies the pagefile uploaded or downloaded @@ -250,32 +215,23 @@ func verifySingleFileUpload(testFileCmd TestFileCommand) { fileInfo, err := os.Stat(testFileCmd.Object) if err != nil { - fmt.Println("error opening the destination file on local disk ") + fmt.Println("error opening the destination localFile on local disk ") os.Exit(1) } - file, err := os.Open(testFileCmd.Object) - if err != nil { - fmt.Println("error opening the file ", testFileCmd.Object) - } - - // getting the shared access signature of the resource. - sourceURL, err := url.Parse(testFileCmd.Subject) + localFile, err := os.Open(testFileCmd.Object) if err != nil { - // fmt.Printf("Error parsing the file url source %s\n", testFileCmd.Object) - os.Exit(1) + fmt.Println("error opening the localFile ", testFileCmd.Object) } - // creating the page file url of the resource on container. - p := azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{Retry: azfile.RetryOptions{TryTimeout: time.Minute * 10}}) - fileURL := azfile.NewFileURL(*sourceURL, p) - get, err := fileURL.Download(context.Background(), 0, azfile.CountToEnd, false) + fileClient, _ := file.NewClientWithNoCredential(testFileCmd.Subject, nil) + get, err := fileClient.DownloadStream(context.Background(), nil) if err != nil { - fmt.Println("unable to get file properties ", err.Error()) + fmt.Println("unable to get localFile properties ", err.Error()) os.Exit(1) } // reading all the bytes downloaded. - retryReader := get.Body(azfile.RetryReaderOptions{MaxRetryRequests: 3}) + retryReader := get.NewRetryReader(context.Background(), &file.RetryReaderOptions{MaxRetries: 3}) defer retryReader.Close() fileBytesDownloaded, err := io.ReadAll(retryReader) if err != nil { @@ -287,18 +243,18 @@ func verifySingleFileUpload(testFileCmd TestFileCommand) { // If the fileSize is 0 and the len of downloaded bytes is not 0 // validation fails if len(fileBytesDownloaded) != 0 { - fmt.Printf("validation failed since the actual file size %d differs from the downloaded file size %d\n", fileInfo.Size(), len(fileBytesDownloaded)) + fmt.Printf("validation failed since the actual localFile size %d differs from the downloaded localFile size %d\n", fileInfo.Size(), len(fileBytesDownloaded)) os.Exit(1) } - // If both the actual and downloaded file size is 0, + // If both the actual and downloaded localFile size is 0, // validation is successful, no need to match the md5 os.Exit(0) } // memory mapping the resource on local path. - mmap, err := NewMMF(file, false, 0, fileInfo.Size()) + mmap, err := NewMMF(localFile, false, 0, fileInfo.Size()) if err != nil { - fmt.Println("error mapping the destination file: ", file, " file size: ", fileInfo.Size(), " Error: ", err.Error()) + fmt.Println("error mapping the destination localFile: ", localFile, " localFile size: ", fileInfo.Size(), " Error: ", err.Error()) os.Exit(1) } @@ -307,18 +263,18 @@ func verifySingleFileUpload(testFileCmd TestFileCommand) { actualMd5 := md5.Sum(mmap) expectedMd5 := md5.Sum(fileBytesDownloaded) if actualMd5 != expectedMd5 { - fmt.Println("the uploaded file's md5 doesn't matches the actual file's md5 for file ", testFileCmd.Object) + fmt.Println("the uploaded localFile's md5 doesn't matches the actual localFile's md5 for localFile ", testFileCmd.Object) os.Exit(1) } - if testFileCmd.CheckContentMD5 && (get.ContentMD5() == nil || len(get.ContentMD5()) == 0) { + if testFileCmd.CheckContentMD5 && (get.ContentMD5 == nil || len(get.ContentMD5) == 0) { fmt.Println("ContentMD5 should not be empty") os.Exit(1) } - // verify the user given metadata supplied while uploading the file against the metadata actually present in the file - if !validateMetadataForFile(testFileCmd.MetaData, get.NewMetadata()) { - fmt.Println("meta data does not match between the actual and uploaded file.") + // verify the user given metadata supplied while uploading the localFile against the metadata actually present in the localFile + if !validateMetadata(testFileCmd.MetaData, get.Metadata) { + fmt.Println("meta data does not match between the actual and uploaded localFile.") os.Exit(1) } @@ -330,41 +286,41 @@ func verifySingleFileUpload(testFileCmd TestFileCommand) { expectedContentType = http.DetectContentType(mmap) } expectedContentType = strings.Split(expectedContentType, ";")[0] - if !validateString(expectedContentType, get.ContentType()) { - str1 := fmt.Sprintf(" %s %s", expectedContentType, get.ContentType()) - fmt.Println(str1 + "mismatch content type between actual and user given file content type") + if !validateString(expectedContentType, common.IffNotNil(get.ContentType, "")) { + str1 := fmt.Sprintf(" %s %s", expectedContentType, common.IffNotNil(get.ContentDisposition, "")) + fmt.Println(str1 + "mismatch content type between actual and user given localFile content type") os.Exit(1) } //verify the content-encoding - if !validateString(testFileCmd.ContentEncoding, get.ContentEncoding()) { - fmt.Println("mismatch content encoding between actual and user given file content encoding") + if !validateString(testFileCmd.ContentEncoding, common.IffNotNil(get.ContentEncoding, "")) { + fmt.Println("mismatch content encoding between actual and user given localFile content encoding") os.Exit(1) } - if !validateString(testFileCmd.ContentDisposition, get.ContentDisposition()) { + if !validateString(testFileCmd.ContentDisposition, common.IffNotNil(get.ContentDisposition, "")) { fmt.Println("mismatch content disposition between actual and user given value") os.Exit(1) } - if !validateString(testFileCmd.ContentLanguage, get.ContentLanguage()) { + if !validateString(testFileCmd.ContentLanguage, common.IffNotNil(get.ContentLanguage, "")) { fmt.Println("mismatch content encoding between actual and user given value") os.Exit(1) } - if !validateString(testFileCmd.CacheControl, get.CacheControl()) { + if !validateString(testFileCmd.CacheControl, common.IffNotNil(get.CacheControl, "")) { fmt.Println("mismatch cache control between actual and user given value") os.Exit(1) } mmap.Unmap() - file.Close() + localFile.Close() // verify the number of pageranges. // this verifies the page-size and azcopy pagefile implementation. if testFileCmd.VerifyBlockOrPageSize { numberOfPages := int(testFileCmd.NumberOfBlocksOrPages) - resp, err := fileURL.GetRangeList(context.Background(), 0, azfile.CountToEnd) + resp, err := fileClient.GetRangeList(context.Background(), nil) if err != nil { fmt.Println("error getting the range list ", err.Error()) os.Exit(1) diff --git a/testSuite/scripts/test_autodetect_blob_type.py b/testSuite/scripts/test_autodetect_blob_type.py index 4dfae50da..afb6e1338 100644 --- a/testSuite/scripts/test_autodetect_blob_type.py +++ b/testSuite/scripts/test_autodetect_blob_type.py @@ -5,7 +5,7 @@ class Autodetect_Blob_Type_Scenario(unittest.TestCase): def setUp(self): - cmd = util.Command("login").add_arguments("--service-principal").add_flags("application-id", os.environ['ACTIVE_DIRECTORY_APPLICATION_ID']) + cmd = util.Command("login").add_arguments("--service-principal").add_flags("application-id", os.environ['ACTIVE_DIRECTORY_APPLICATION_ID']).add_flags("tenant-id", os.environ['OAUTH_TENANT_ID']) cmd.execute_azcopy_copy_command() def tearDown(self): diff --git a/testSuite/scripts/test_azcopy_operations.py b/testSuite/scripts/test_azcopy_operations.py index 9a627542d..e78e0c655 100644 --- a/testSuite/scripts/test_azcopy_operations.py +++ b/testSuite/scripts/test_azcopy_operations.py @@ -7,7 +7,7 @@ class Azcopy_Operation_User_Scenario(unittest.TestCase): def setUp(self): - cmd = util.Command("login").add_arguments("--service-principal").add_flags("application-id", os.environ['ACTIVE_DIRECTORY_APPLICATION_ID']) + cmd = util.Command("login").add_arguments("--service-principal").add_flags("application-id", os.environ['ACTIVE_DIRECTORY_APPLICATION_ID']).add_flags("tenant-id", os.environ['OAUTH_TENANT_ID']) cmd.execute_azcopy_copy_command() def tearDown(self): diff --git a/testSuite/scripts/test_blob_download.py b/testSuite/scripts/test_blob_download.py index 24642c1af..d33d2f514 100644 --- a/testSuite/scripts/test_blob_download.py +++ b/testSuite/scripts/test_blob_download.py @@ -10,7 +10,7 @@ class Blob_Download_User_Scenario(unittest.TestCase): def setUp(self): - cmd = util.Command("login").add_arguments("--service-principal").add_flags("application-id", os.environ['ACTIVE_DIRECTORY_APPLICATION_ID']) + cmd = util.Command("login").add_arguments("--service-principal").add_flags("application-id", os.environ['ACTIVE_DIRECTORY_APPLICATION_ID']).add_flags("tenant-id", os.environ['OAUTH_TENANT_ID']) cmd.execute_azcopy_copy_command() def tearDown(self): diff --git a/testSuite/scripts/test_blobfs_download_SAS.py b/testSuite/scripts/test_blobfs_download_SAS.py index de3f8b32d..1ab4cc360 100644 --- a/testSuite/scripts/test_blobfs_download_SAS.py +++ b/testSuite/scripts/test_blobfs_download_SAS.py @@ -5,7 +5,7 @@ class BlobFs_Download_SAS_User_Scenarios(unittest.TestCase): def setUp(self): - cmd = util.Command("login").add_arguments("--service-principal").add_flags("application-id", os.environ['ACTIVE_DIRECTORY_APPLICATION_ID']) + cmd = util.Command("login").add_arguments("--service-principal").add_flags("application-id", os.environ['ACTIVE_DIRECTORY_APPLICATION_ID']).add_flags("tenant-id", os.environ['OAUTH_TENANT_ID']) cmd.execute_azcopy_copy_command() self.cachedAzCopyAccountKey = os.environ['ACCOUNT_KEY'] os.environ['ACCOUNT_KEY'] = '' diff --git a/testSuite/scripts/test_blobfs_download_sharedkey.py b/testSuite/scripts/test_blobfs_download_sharedkey.py index a422794ee..35929e504 100644 --- a/testSuite/scripts/test_blobfs_download_sharedkey.py +++ b/testSuite/scripts/test_blobfs_download_sharedkey.py @@ -7,7 +7,7 @@ class BlobFs_Download_SharedKey_User_Scenarios(unittest.TestCase): def setUp(self): - cmd = util.Command("login").add_arguments("--service-principal").add_flags("application-id", os.environ['ACTIVE_DIRECTORY_APPLICATION_ID']) + cmd = util.Command("login").add_arguments("--service-principal").add_flags("application-id", os.environ['ACTIVE_DIRECTORY_APPLICATION_ID']).add_flags("tenant-id", os.environ['OAUTH_TENANT_ID']) cmd.execute_azcopy_copy_command() def tearDown(self): diff --git a/testSuite/scripts/test_blobfs_upload_SAS.py b/testSuite/scripts/test_blobfs_upload_SAS.py index 55cb5f051..4c9fdd1fe 100644 --- a/testSuite/scripts/test_blobfs_upload_SAS.py +++ b/testSuite/scripts/test_blobfs_upload_SAS.py @@ -4,7 +4,7 @@ class BlobFs_Upload_SAS_User_Scenarios(unittest.TestCase): def setUp(self): - cmd = util.Command("login").add_arguments("--service-principal").add_flags("application-id", os.environ['ACTIVE_DIRECTORY_APPLICATION_ID']) + cmd = util.Command("login").add_arguments("--service-principal").add_flags("application-id", os.environ['ACTIVE_DIRECTORY_APPLICATION_ID']).add_flags("tenant-id", os.environ['OAUTH_TENANT_ID']) cmd.execute_azcopy_copy_command() self.cachedAzCopyAccountKey = os.environ['ACCOUNT_KEY'] os.environ['ACCOUNT_KEY'] = '' diff --git a/testSuite/scripts/test_blobfs_upload_sharedkey.py b/testSuite/scripts/test_blobfs_upload_sharedkey.py index 42df8274d..836e63912 100644 --- a/testSuite/scripts/test_blobfs_upload_sharedkey.py +++ b/testSuite/scripts/test_blobfs_upload_sharedkey.py @@ -10,7 +10,7 @@ class BlobFs_Upload_ShareKey_User_Scenarios(unittest.TestCase): def setUp(self): - cmd = util.Command("login").add_arguments("--service-principal").add_flags("application-id", os.environ['ACTIVE_DIRECTORY_APPLICATION_ID']) + cmd = util.Command("login").add_arguments("--service-principal").add_flags("application-id", os.environ['ACTIVE_DIRECTORY_APPLICATION_ID']).add_flags("tenant-id", os.environ['OAUTH_TENANT_ID']) cmd.execute_azcopy_copy_command() def tearDown(self): diff --git a/testSuite/scripts/test_service_to_service_copy.py b/testSuite/scripts/test_service_to_service_copy.py index a8994c4f5..1f4ca218a 100644 --- a/testSuite/scripts/test_service_to_service_copy.py +++ b/testSuite/scripts/test_service_to_service_copy.py @@ -12,7 +12,7 @@ class Service_2_Service_Copy_User_Scenario(unittest.TestCase): def setUp(self): - cmd = util.Command("login").add_arguments("--service-principal").add_flags("application-id", os.environ['ACTIVE_DIRECTORY_APPLICATION_ID']) + cmd = util.Command("login").add_arguments("--service-principal").add_flags("application-id", os.environ['ACTIVE_DIRECTORY_APPLICATION_ID']).add_flags("tenant-id", os.environ['OAUTH_TENANT_ID']) cmd.execute_azcopy_copy_command() # init bucket_name common_prefix = 's2scopybucket' diff --git a/testSuite/scripts/test_upload_block_blob.py b/testSuite/scripts/test_upload_block_blob.py index 372786bf1..c1dedadd3 100644 --- a/testSuite/scripts/test_upload_block_blob.py +++ b/testSuite/scripts/test_upload_block_blob.py @@ -9,7 +9,7 @@ class Block_Upload_User_Scenarios(unittest.TestCase): def setUp(self): - cmd = util.Command("login").add_arguments("--service-principal").add_flags("application-id", os.environ['ACTIVE_DIRECTORY_APPLICATION_ID']) + cmd = util.Command("login").add_arguments("--service-principal").add_flags("application-id", os.environ['ACTIVE_DIRECTORY_APPLICATION_ID']).add_flags("tenant-id", os.environ['OAUTH_TENANT_ID']) cmd.execute_azcopy_copy_command() def tearDown(self): @@ -101,7 +101,7 @@ def test_10_1kb_blob_upload_with_oauth(self): # test_metaData_content_encoding_content_type verifies the meta data, content type, # content encoding of 2kb upload to container through azcopy. - def test_blob_metaData_content_encoding_content_type(self): + def test_blob_metadata_content_encoding_content_type(self): # create 2kb file test_mcect.txt filename = "test_mcect.txt" file_path = util.create_test_file(filename, 2048) diff --git a/testSuite/scripts/test_upload_page_blob.py b/testSuite/scripts/test_upload_page_blob.py index c14e527e9..b1efd47e4 100644 --- a/testSuite/scripts/test_upload_page_blob.py +++ b/testSuite/scripts/test_upload_page_blob.py @@ -5,7 +5,7 @@ class PageBlob_Upload_User_Scenarios(unittest.TestCase): def setUp(self): - cmd = util.Command("login").add_arguments("--service-principal").add_flags("application-id", os.environ['ACTIVE_DIRECTORY_APPLICATION_ID']) + cmd = util.Command("login").add_arguments("--service-principal").add_flags("application-id", os.environ['ACTIVE_DIRECTORY_APPLICATION_ID']).add_flags("tenant-id", os.environ['OAUTH_TENANT_ID']) cmd.execute_azcopy_copy_command() def tearDown(self): diff --git a/testSuite/scripts/utility.py b/testSuite/scripts/utility.py index 13eb28894..f708ac290 100644 --- a/testSuite/scripts/utility.py +++ b/testSuite/scripts/utility.py @@ -259,10 +259,8 @@ def initialize_test_suite(test_dir_path, container_sas, container_oauth, contain print("failed to clean test filesystem.") if not clean_test_container(test_container_url): print("failed to clean test blob container.") - if not clean_test_container(test_oauth_container_url): + if not clean_test_container(test_oauth_container_validate_sas_url): print("failed to clean OAuth test blob container.") - if not clean_test_container(test_oauth_container_url): - print("failed to clean OAuth container.") if not clean_test_container(test_premium_account_contaier_url): print("failed to clean premium container.") if not clean_test_blob_account(test_s2s_src_blob_account_url): @@ -592,7 +590,7 @@ def verify_operation(command): command, stderr=subprocess.STDOUT, shell=True, timeout=360, universal_newlines=True) except subprocess.CalledProcessError as exec: - # print("command failed with error code ", exec.returncode, " and message " + exec.output) + print("command failed with error code ", exec.returncode, " and message " + exec.output) return False else: return True